diff --git a/pkg/apis/ark/v1/backup.go b/pkg/apis/ark/v1/backup.go index 16f9075ef..2fe867f09 100644 --- a/pkg/apis/ark/v1/backup.go +++ b/pkg/apis/ark/v1/backup.go @@ -53,8 +53,70 @@ type BackupSpec struct { // IncludeClusterResources specifies whether cluster-scoped resources // should be included for consideration in the backup. IncludeClusterResources *bool `json:"includeClusterResources"` + + // Hooks represent custom behaviors that should be executed at different phases of the backup. + Hooks BackupHooks `json:"hooks"` } +// BackupHooks contains custom behaviors that should be executed at different phases of the backup. +type BackupHooks struct { + // Resources are hooks that should be executed when backing up individual instances of a resource. + Resources []BackupResourceHookSpec `json:"resources"` +} + +// BackupResourceHookSpec defines one or more BackupResourceHooks that should be executed based on +// the rules defined for namespaces, resources, and label selector. +type BackupResourceHookSpec struct { + // Name is the name of this hook. + Name string `json:"name"` + // IncludedNamespaces specifies the namespaces to which this hook spec applies. If empty, it applies + // to all namespaces. + IncludedNamespaces []string `json:"includedNamespaces"` + // ExcludedNamespaces specifies the namespaces to which this hook spec does not apply. + ExcludedNamespaces []string `json:"excludedNamespaces"` + // IncludedResources specifies the resources to which this hook spec applies. If empty, it applies + // to all resources. + IncludedResources []string `json:"includedResources"` + // ExcludedResources specifies the resources to which this hook spec does not apply. + ExcludedResources []string `json:"excludedResources"` + // LabelSelector, if specified, filters the resources to which this hook spec applies. + LabelSelector *metav1.LabelSelector `json:"labelSelector"` + // Hooks is a list of BackupResourceHooks to execute. + Hooks []BackupResourceHook `json:"hooks"` +} + +// BackupResourceHook defines a hook for a resource. +type BackupResourceHook struct { + // Exec defines an exec hook. + Exec *ExecHook `json:"exec"` +} + +// ExecHook is a hook that uses the pod exec API to execute a command in a container in a pod. +type ExecHook struct { + // Container is the container in the pod where the command should be executed. If not specified, + // the pod's first container is used. + Container string `json:"container"` + // Command is the command and arguments to execute. + Command []string `json:"command"` + // OnError specifies how Ark should behave if it encounters an error executing this hook. + OnError HookErrorMode `json:"onError"` + // Timeout defines the maximum amount of time Ark should wait for the hook to complete before + // considering the execution a failure. + Timeout metav1.Duration `json:"timeout"` +} + +// HookErrorMode defines how Ark should treat an error from a hook. +type HookErrorMode string + +const ( + // HookErrorModeContinue means that an error from a hook is acceptable, and the backup can + // proceed. + HookErrorModeContinue HookErrorMode = "Continue" + // HookErrorModeFail means that an error from a hook is problematic, and the backup should be in + // error. + HookErrorModeFail HookErrorMode = "Fail" +) + // BackupPhase is a string representation of the lifecycle phase // of an Ark backup. type BackupPhase string diff --git a/pkg/backup/backup.go b/pkg/backup/backup.go index 2bbb7ce16..db2114808 100644 --- a/pkg/backup/backup.go +++ b/pkg/backup/backup.go @@ -19,17 +19,12 @@ package backup import ( "archive/tar" "compress/gzip" - "encoding/json" "fmt" "io" - "path/filepath" - "strings" - "time" "github.com/pkg/errors" "github.com/sirupsen/logrus" - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -39,6 +34,7 @@ import ( "github.com/heptio/ark/pkg/client" "github.com/heptio/ark/pkg/discovery" "github.com/heptio/ark/pkg/util/collections" + kubeutil "github.com/heptio/ark/pkg/util/kube" ) // Backupper performs backups. @@ -50,28 +46,27 @@ type Backupper interface { // kubernetesBackupper implements Backupper. type kubernetesBackupper struct { - dynamicFactory client.DynamicFactory - discoveryHelper discovery.Helper - actions map[schema.GroupResource]Action - itemBackupper itemBackupper + dynamicFactory client.DynamicFactory + discoveryHelper discovery.Helper + actions map[schema.GroupResource]Action + podCommandExecutor podCommandExecutor + + groupBackupperFactory groupBackupperFactory } -var _ Backupper = &kubernetesBackupper{} - -// ActionContext contains contextual information for actions. -type ActionContext struct { - logger *logrus.Logger -} - -func (ac ActionContext) infof(msg string, args ...interface{}) { - ac.logger.Infof(msg, args...) +// ResourceIdentifier describes a single item by its group, resource, namespace, and name. +type ResourceIdentifier struct { + schema.GroupResource + Namespace string + Name string } // Action is an actor that performs an operation on an individual item being backed up. type Action interface { - // Execute is invoked on an item being backed up. If an error is returned, the Backup is marked as - // failed. - Execute(ctx *backupContext, item map[string]interface{}, backupper itemBackupper) error + // Execute allows the Action to perform arbitrary logic with the item being backed up and the + // backup itself. Implementations may return additional ResourceIdentifiers that indicate specific + // items that also need to be backed up. + Execute(log *logrus.Entry, item runtime.Unstructured, backup *api.Backup) ([]ResourceIdentifier, error) } type itemKey struct { @@ -89,6 +84,7 @@ func NewKubernetesBackupper( discoveryHelper discovery.Helper, dynamicFactory client.DynamicFactory, actions map[string]Action, + podCommandExecutor podCommandExecutor, ) (Backupper, error) { resolvedActions, err := resolveActions(discoveryHelper, actions) if err != nil { @@ -96,10 +92,12 @@ func NewKubernetesBackupper( } return &kubernetesBackupper{ - discoveryHelper: discoveryHelper, - dynamicFactory: dynamicFactory, - actions: resolvedActions, - itemBackupper: &realItemBackupper{}, + discoveryHelper: discoveryHelper, + dynamicFactory: dynamicFactory, + actions: resolvedActions, + podCommandExecutor: podCommandExecutor, + + groupBackupperFactory: &defaultGroupBackupperFactory{}, }, nil } @@ -122,14 +120,13 @@ func resolveActions(helper discovery.Helper, actions map[string]Action) (map[sch // getResourceIncludesExcludes takes the lists of resources to include and exclude, uses the // discovery helper to resolve them to fully-qualified group-resource names, and returns an // IncludesExcludes list. -func (ctx *backupContext) getResourceIncludesExcludes(helper discovery.Helper, includes, excludes []string) *collections.IncludesExcludes { +func getResourceIncludesExcludes(helper discovery.Helper, includes, excludes []string) *collections.IncludesExcludes { resources := collections.GenerateIncludesExcludes( includes, excludes, func(item string) string { gvr, _, err := helper.ResourceFor(schema.ParseGroupResource(item).WithVersion("")) if err != nil { - ctx.infof("Unable to resolve resource %q: %v", item, err) return "" } @@ -138,9 +135,6 @@ func (ctx *backupContext) getResourceIncludesExcludes(helper discovery.Helper, i }, ) - ctx.infof("Including resources: %v", strings.Join(resources.GetIncludes(), ", ")) - ctx.infof("Excluding resources: %v", strings.Join(resources.GetExcludes(), ", ")) - return resources } @@ -150,33 +144,29 @@ func getNamespaceIncludesExcludes(backup *api.Backup) *collections.IncludesExclu return collections.NewIncludesExcludes().Includes(backup.Spec.IncludedNamespaces...).Excludes(backup.Spec.ExcludedNamespaces...) } -type backupContext struct { - backup *api.Backup - w tarWriter - logger *logrus.Logger - namespaceIncludesExcludes *collections.IncludesExcludes - resourceIncludesExcludes *collections.IncludesExcludes - // deploymentsBackedUp marks whether we've seen and are backing up the deployments resource, from - // either the apps or extensions api groups. We only want to back them up once, from whichever api - // group we see first. - deploymentsBackedUp bool - // networkPoliciesBackedUp marks whether we've seen and are backing up the networkpolicies - // resource, from either the networking.k8s.io or extensions api groups. We only want to back them - // up once, from whichever api group we see first. - networkPoliciesBackedUp bool +func getResourceHooks(hookSpecs []api.BackupResourceHookSpec, discoveryHelper discovery.Helper) ([]resourceHook, error) { + resourceHooks := make([]resourceHook, 0, len(hookSpecs)) - actions map[schema.GroupResource]Action + for _, r := range hookSpecs { + h := resourceHook{ + name: r.Name, + namespaces: collections.NewIncludesExcludes().Includes(r.IncludedNamespaces...).Excludes(r.ExcludedNamespaces...), + resources: getResourceIncludesExcludes(discoveryHelper, r.IncludedResources, r.ExcludedResources), + hooks: r.Hooks, + } - // backedUpItems keeps track of items that have been backed up already. - backedUpItems map[itemKey]struct{} + if r.LabelSelector != nil { + labelSelector, err := metav1.LabelSelectorAsSelector(r.LabelSelector) + if err != nil { + return []resourceHook{}, errors.WithStack(err) + } + h.labelSelector = labelSelector + } - dynamicFactory client.DynamicFactory + resourceHooks = append(resourceHooks, h) + } - discoveryHelper discovery.Helper -} - -func (ctx *backupContext) infof(msg string, args ...interface{}) { - ctx.logger.Infof(msg, args...) + return resourceHooks, nil } // Backup backs up the items specified in the Backup, placing them in a gzip-compressed tar file @@ -191,38 +181,64 @@ func (kb *kubernetesBackupper) Backup(backup *api.Backup, backupFile, logFile io gzippedLog := gzip.NewWriter(logFile) defer gzippedLog.Close() - var errs []error + logger := logrus.New() + logger.Out = gzippedLog + log := logger.WithField("backup", kubeutil.NamespaceAndName(backup)) + log.Info("Starting backup") - log := logrus.New() - log.Out = gzippedLog + namespaceIncludesExcludes := getNamespaceIncludesExcludes(backup) + log.Infof("Including namespaces: %s", namespaceIncludesExcludes.IncludesString()) + log.Infof("Excluding namespaces: %s", namespaceIncludesExcludes.ExcludesString()) - ctx := &backupContext{ - backup: backup, - w: tw, - logger: log, - namespaceIncludesExcludes: getNamespaceIncludesExcludes(backup), - backedUpItems: make(map[itemKey]struct{}), - actions: kb.actions, - dynamicFactory: kb.dynamicFactory, - discoveryHelper: kb.discoveryHelper, + resourceIncludesExcludes := getResourceIncludesExcludes(kb.discoveryHelper, backup.Spec.IncludedResources, backup.Spec.ExcludedResources) + log.Infof("Including resources: %s", resourceIncludesExcludes.IncludesString()) + log.Infof("Excluding resources: %s", resourceIncludesExcludes.ExcludesString()) + + resourceHooks, err := getResourceHooks(backup.Spec.Hooks.Resources, kb.discoveryHelper) + if err != nil { + return err } - ctx.infof("Starting backup") + var labelSelector string + if backup.Spec.LabelSelector != nil { + labelSelector = metav1.FormatLabelSelector(backup.Spec.LabelSelector) + } - ctx.resourceIncludesExcludes = ctx.getResourceIncludesExcludes(kb.discoveryHelper, backup.Spec.IncludedResources, backup.Spec.ExcludedResources) + backedUpItems := make(map[itemKey]struct{}) + var errs []error + + cohabitatingResources := map[string]*cohabitatingResource{ + "deployments": newCohabitatingResource("deployments", "extensions", "apps"), + "networkpolicies": newCohabitatingResource("networkpolicies", "extensions", "networking.k8s.io"), + } + + gb := kb.groupBackupperFactory.newGroupBackupper( + log, + backup, + namespaceIncludesExcludes, + resourceIncludesExcludes, + labelSelector, + kb.dynamicFactory, + kb.discoveryHelper, + backedUpItems, + cohabitatingResources, + kb.actions, + kb.podCommandExecutor, + tw, + resourceHooks, + ) for _, group := range kb.discoveryHelper.Resources() { - ctx.infof("Processing group %s", group.GroupVersion) - if err := kb.backupGroup(ctx, group); err != nil { + if err := gb.backupGroup(group); err != nil { errs = append(errs, err) } } - err := kuberrs.NewAggregate(errs) + err = kuberrs.NewAggregate(errs) if err == nil { - ctx.infof("Backup completed successfully") + log.Infof("Backup completed successfully") } else { - ctx.infof("Backup completed with errors: %v", err) + log.Infof("Backup completed with errors: %v", err) } return err @@ -233,272 +249,3 @@ type tarWriter interface { Write([]byte) (int, error) WriteHeader(*tar.Header) error } - -// backupGroup backs up a single API group. -func (kb *kubernetesBackupper) backupGroup(ctx *backupContext, group *metav1.APIResourceList) error { - var ( - errs []error - pv *metav1.APIResource - ) - - processResource := func(resource metav1.APIResource) { - ctx.infof("Processing resource %s/%s", group.GroupVersion, resource.Name) - if err := kb.backupResource(ctx, group, resource); err != nil { - errs = append(errs, err) - } - } - - for _, resource := range group.APIResources { - // do PVs last because if we're also backing up PVCs, we want to backup - // PVs within the scope of the PVCs (within the PVC action) to allow - // for hooks to run - if strings.ToLower(resource.Name) == "persistentvolumes" && strings.ToLower(group.GroupVersion) == "v1" { - pvResource := resource - pv = &pvResource - continue - } - processResource(resource) - } - - if pv != nil { - processResource(*pv) - } - - return kuberrs.NewAggregate(errs) -} - -const ( - appsDeploymentsResource = "deployments.apps" - extensionsDeploymentsResource = "deployments.extensions" - networkingNetworkPoliciesResource = "networkpolicies.networking.k8s.io" - extensionsNetworkPoliciesResource = "networkpolicies.extensions" -) - -// backupResource backs up all the objects for a given group-version-resource. -func (kb *kubernetesBackupper) backupResource( - ctx *backupContext, - group *metav1.APIResourceList, - resource metav1.APIResource, -) error { - var errs []error - - gv, err := schema.ParseGroupVersion(group.GroupVersion) - if err != nil { - return errors.Wrapf(err, "error parsing GroupVersion %s", group.GroupVersion) - } - gvr := schema.GroupVersionResource{Group: gv.Group, Version: gv.Version} - gr := schema.GroupResource{Group: gv.Group, Resource: resource.Name} - grString := gr.String() - - switch { - case ctx.backup.Spec.IncludeClusterResources == nil: - // when IncludeClusterResources == nil (auto), only directly - // back up cluster-scoped resources if we're doing a full-cluster - // (all namespaces) backup. Note that in the case of a subset of - // namespaces being backed up, some related cluster-scoped resources - // may still be backed up if triggered by a custom action (e.g. PVC->PV). - if !resource.Namespaced && !ctx.namespaceIncludesExcludes.IncludeEverything() { - ctx.infof("Skipping resource %s because it's cluster-scoped and only specific namespaces are included in the backup", grString) - return nil - } - case *ctx.backup.Spec.IncludeClusterResources == false: - if !resource.Namespaced { - ctx.infof("Skipping resource %s because it's cluster-scoped", grString) - return nil - } - case *ctx.backup.Spec.IncludeClusterResources == true: - // include the resource, no action required - } - - if !ctx.resourceIncludesExcludes.ShouldInclude(grString) { - ctx.infof("Resource %s is excluded", grString) - return nil - } - - shouldBackup := func(gr, gr1, gr2 string, backedUp *bool) bool { - // if it's neither of the specified dupe group-resources, back it up - if gr != gr1 && gr != gr2 { - return true - } - - // if it hasn't been backed up yet, back it up - if !*backedUp { - *backedUp = true - return true - } - - // else, don't back it up, and log why - var other string - switch gr { - case gr1: - other = gr2 - case gr2: - other = gr1 - } - - ctx.infof("Skipping resource %q because it's a duplicate of %q", gr, other) - return false - } - - if !shouldBackup(grString, appsDeploymentsResource, extensionsDeploymentsResource, &ctx.deploymentsBackedUp) { - return nil - } - - if !shouldBackup(grString, networkingNetworkPoliciesResource, extensionsNetworkPoliciesResource, &ctx.networkPoliciesBackedUp) { - return nil - } - - var namespacesToList []string - if resource.Namespaced { - namespacesToList = getNamespacesToList(ctx.namespaceIncludesExcludes) - } else { - namespacesToList = []string{""} - } - for _, namespace := range namespacesToList { - resourceClient, err := kb.dynamicFactory.ClientForGroupVersionResource(gvr, resource, namespace) - if err != nil { - return err - } - - labelSelector := "" - if ctx.backup.Spec.LabelSelector != nil { - labelSelector = metav1.FormatLabelSelector(ctx.backup.Spec.LabelSelector) - } - unstructuredList, err := resourceClient.List(metav1.ListOptions{LabelSelector: labelSelector}) - if err != nil { - return errors.WithStack(err) - } - - // do the backup - items, err := meta.ExtractList(unstructuredList) - if err != nil { - return errors.WithStack(err) - } - - for _, item := range items { - unstructured, ok := item.(runtime.Unstructured) - if !ok { - errs = append(errs, errors.Errorf("unexpected type %T", item)) - continue - } - - obj := unstructured.UnstructuredContent() - - if err := kb.itemBackupper.backupItem(ctx, obj, gr); err != nil { - errs = append(errs, err) - } - } - } - - return kuberrs.NewAggregate(errs) -} - -// getNamespacesToList examines ie and resolves the includes and excludes to a full list of -// namespaces to list. If ie is nil or it includes *, the result is just "" (list across all -// namespaces). Otherwise, the result is a list of every included namespace minus all excluded ones. -func getNamespacesToList(ie *collections.IncludesExcludes) []string { - if ie == nil { - return []string{""} - } - - if ie.ShouldInclude("*") { - // "" means all namespaces - return []string{""} - } - - var list []string - for _, i := range ie.GetIncludes() { - if ie.ShouldInclude(i) { - list = append(list, i) - } - } - - return list -} - -type itemBackupper interface { - backupItem(ctx *backupContext, item map[string]interface{}, groupResource schema.GroupResource) error -} - -type realItemBackupper struct{} - -// backupItem backs up an individual item to tarWriter. The item may be excluded based on the -// namespaces IncludesExcludes list. -func (ib *realItemBackupper) backupItem(ctx *backupContext, item map[string]interface{}, groupResource schema.GroupResource) error { - name, err := collections.GetString(item, "metadata.name") - if err != nil { - return err - } - - namespace, err := collections.GetString(item, "metadata.namespace") - // a non-nil error is assumed to be due to a cluster-scoped item - if err == nil && !ctx.namespaceIncludesExcludes.ShouldInclude(namespace) { - ctx.infof("Excluding item %s because namespace %s is excluded", name, namespace) - return nil - } - - if namespace == "" && ctx.backup.Spec.IncludeClusterResources != nil && *ctx.backup.Spec.IncludeClusterResources == false { - ctx.infof("Excluding item %s because resource %s is cluster-scoped and IncludeClusterResources is false", name, groupResource.String()) - return nil - } - - if !ctx.resourceIncludesExcludes.ShouldInclude(groupResource.String()) { - ctx.infof("Excluding item %s because resource %s is excluded", name, groupResource.String()) - return nil - } - - key := itemKey{ - resource: groupResource.String(), - namespace: namespace, - name: name, - } - - if _, exists := ctx.backedUpItems[key]; exists { - ctx.infof("Skipping item %s because it's already been backed up.", name) - return nil - } - ctx.backedUpItems[key] = struct{}{} - - // Never save status - delete(item, "status") - - if action, hasAction := ctx.actions[groupResource]; hasAction { - ctx.infof("Executing action on %s, ns=%s, name=%s", groupResource.String(), namespace, name) - - if err := action.Execute(ctx, item, ib); err != nil { - return err - } - } - - ctx.infof("Backing up resource=%s, ns=%s, name=%s", groupResource.String(), namespace, name) - - var filePath string - if namespace != "" { - filePath = filepath.Join(api.ResourcesDir, groupResource.String(), api.NamespaceScopedDir, namespace, name+".json") - } else { - filePath = filepath.Join(api.ResourcesDir, groupResource.String(), api.ClusterScopedDir, name+".json") - } - - itemBytes, err := json.Marshal(item) - if err != nil { - return errors.WithStack(err) - } - - hdr := &tar.Header{ - Name: filePath, - Size: int64(len(itemBytes)), - Typeflag: tar.TypeReg, - Mode: 0755, - ModTime: time.Now(), - } - - if err := ctx.w.WriteHeader(hdr); err != nil { - return errors.WithStack(err) - } - - if _, err := ctx.w.Write(itemBytes); err != nil { - return errors.WithStack(err) - } - - return nil -} diff --git a/pkg/backup/backup_pv_action.go b/pkg/backup/backup_pv_action.go index 3e9a0f283..41930925b 100644 --- a/pkg/backup/backup_pv_action.go +++ b/pkg/backup/backup_pv_action.go @@ -18,10 +18,12 @@ package backup import ( "github.com/pkg/errors" + "github.com/sirupsen/logrus" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "github.com/heptio/ark/pkg/apis/ark/v1" "github.com/heptio/ark/pkg/util/collections" ) @@ -30,51 +32,29 @@ import ( type backupPVAction struct { } -var _ Action = &backupPVAction{} - func NewBackupPVAction() Action { return &backupPVAction{} } +var pvGroupResource = schema.GroupResource{Group: "", Resource: "persistentvolumes"} + // Execute finds the PersistentVolume referenced by the provided // PersistentVolumeClaim and backs it up -func (a *backupPVAction) Execute(ctx *backupContext, pvc map[string]interface{}, backupper itemBackupper) error { - pvcName, err := collections.GetString(pvc, "metadata.name") - if err != nil { - ctx.infof("unable to get metadata.name for PersistentVolumeClaim: %v", err) - return err - } +func (a *backupPVAction) Execute(log *logrus.Entry, item runtime.Unstructured, backup *v1.Backup) ([]ResourceIdentifier, error) { + log.Info("Executing backupPVAction") + var additionalItems []ResourceIdentifier + + pvc := item.UnstructuredContent() volumeName, err := collections.GetString(pvc, "spec.volumeName") if err != nil { - ctx.infof("unable to get spec.volumeName for PersistentVolumeClaim %s: %v", pvcName, err) - return err + return additionalItems, errors.WithMessage(err, "unable to get spec.volumeName") } - gvr, resource, err := ctx.discoveryHelper.ResourceFor(schema.GroupVersionResource{Resource: "persistentvolumes"}) - if err != nil { - ctx.infof("error getting GroupVersionResource for PersistentVolumes: %v", err) - return err - } - gr := gvr.GroupResource() + additionalItems = append(additionalItems, ResourceIdentifier{ + GroupResource: pvGroupResource, + Name: volumeName, + }) - client, err := ctx.dynamicFactory.ClientForGroupVersionResource(gvr, resource, "") - if err != nil { - ctx.infof("error getting client for GroupVersionResource=%s, Resource=%s: %v", gvr.String(), resource, err) - return err - } - - pv, err := client.Get(volumeName, metav1.GetOptions{}) - if err != nil { - ctx.infof("error getting PersistentVolume %s: %v", volumeName, err) - return errors.WithStack(err) - } - - ctx.infof("backing up PersistentVolume %s for PersistentVolumeClaim %s", volumeName, pvcName) - if err := backupper.backupItem(ctx, pv.UnstructuredContent(), gr); err != nil { - ctx.infof("error backing up PersistentVolume %s: %v", volumeName, err) - return err - } - - return nil + return additionalItems, nil } diff --git a/pkg/backup/backup_pv_action_test.go b/pkg/backup/backup_pv_action_test.go index d3c6c8844..bfca95c1a 100644 --- a/pkg/backup/backup_pv_action_test.go +++ b/pkg/backup/backup_pv_action_test.go @@ -1,5 +1,5 @@ /* -Copyright 2017 Heptio Inc. +Copyright 2017 the Heptio Ark contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,77 +19,30 @@ package backup import ( "testing" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - - testutil "github.com/heptio/ark/pkg/util/test" - testlogger "github.com/sirupsen/logrus/hooks/test" + "github.com/heptio/ark/pkg/apis/ark/v1" + arktest "github.com/heptio/ark/pkg/util/test" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) func TestBackupPVAction(t *testing.T) { - tests := []struct { - name string - item map[string]interface{} - volumeName string - expectedErr bool - }{ - { - name: "execute PV backup in normal case", - item: map[string]interface{}{ - "metadata": map[string]interface{}{"name": "pvc-1"}, - "spec": map[string]interface{}{"volumeName": "pv-1"}, - }, - volumeName: "pv-1", - expectedErr: false, - }, - { - name: "error when PVC has no metadata.name", - item: map[string]interface{}{ - "metadata": map[string]interface{}{}, - "spec": map[string]interface{}{"volumeName": "pv-1"}, - }, - expectedErr: true, - }, - { - name: "error when PVC has no spec.volumeName", - item: map[string]interface{}{ - "metadata": map[string]interface{}{"name": "pvc-1"}, - "spec": map[string]interface{}{}, - }, - expectedErr: true, + pvc := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "spec": map[string]interface{}{}, }, } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var ( - discoveryHelper = testutil.NewFakeDiscoveryHelper(true, nil) - dynamicFactory = &testutil.FakeDynamicFactory{} - dynamicClient = &testutil.FakeDynamicClient{} - testLogger, _ = testlogger.NewNullLogger() - ctx = &backupContext{discoveryHelper: discoveryHelper, dynamicFactory: dynamicFactory, logger: testLogger} - backupper = &fakeItemBackupper{} - action = NewBackupPVAction() - pv = &unstructured.Unstructured{} - pvGVR = schema.GroupVersionResource{Resource: "persistentvolumes"} - ) + backup := &v1.Backup{} - dynamicFactory.On("ClientForGroupVersionResource", - pvGVR, - metav1.APIResource{Name: "persistentvolumes"}, - "", - ).Return(dynamicClient, nil) + a := NewBackupPVAction() - dynamicClient.On("Get", test.volumeName, metav1.GetOptions{}).Return(pv, nil) + additional, err := a.Execute(arktest.NewLogger(), pvc, backup) + assert.EqualError(t, err, "unable to get spec.volumeName: key volumeName not found") - backupper.On("backupItem", ctx, pv.UnstructuredContent(), pvGVR.GroupResource()).Return(nil) - - // method under test - res := action.Execute(ctx, test.item, backupper) - - assert.Equal(t, test.expectedErr, res != nil) - }) - } + pvc.Object["spec"].(map[string]interface{})["volumeName"] = "myVolume" + additional, err = a.Execute(arktest.NewLogger(), pvc, backup) + require.NoError(t, err) + require.Len(t, additional, 1) + assert.Equal(t, ResourceIdentifier{GroupResource: pvGroupResource, Name: "myVolume"}, additional[0]) } diff --git a/pkg/backup/backup_test.go b/pkg/backup/backup_test.go index 0ea9b803b..06fdfbe85 100644 --- a/pkg/backup/backup_test.go +++ b/pkg/backup/backup_test.go @@ -17,19 +17,17 @@ limitations under the License. package backup import ( - "archive/tar" "bytes" "compress/gzip" "encoding/json" - "errors" "io" - "io/ioutil" "reflect" "sort" "testing" "time" - testlogger "github.com/sirupsen/logrus/hooks/test" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -37,13 +35,16 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/sets" "github.com/heptio/ark/pkg/apis/ark/v1" + "github.com/heptio/ark/pkg/client" + "github.com/heptio/ark/pkg/discovery" "github.com/heptio/ark/pkg/util/collections" - . "github.com/heptio/ark/pkg/util/test" + kubeutil "github.com/heptio/ark/pkg/util/kube" + arktest "github.com/heptio/ark/pkg/util/test" ) var ( @@ -54,32 +55,22 @@ var ( ) type fakeAction struct { - ids []string - backups []*v1.Backup + ids []string + backups []*v1.Backup + additionalItems []ResourceIdentifier } var _ Action = &fakeAction{} -func (a *fakeAction) Execute(ctx *backupContext, item map[string]interface{}, backupper itemBackupper) error { - metadata, err := collections.GetMap(item, "metadata") +func (a *fakeAction) Execute(log *logrus.Entry, item runtime.Unstructured, backup *v1.Backup) ([]ResourceIdentifier, error) { + metadata, err := meta.Accessor(item) if err != nil { - return err + return a.additionalItems, err } + a.ids = append(a.ids, kubeutil.NamespaceAndName(metadata)) + a.backups = append(a.backups, backup) - var id string - - if v, ok := metadata["namespace"]; ok { - id = v.(string) + "/" - } - - if v, ok := metadata["name"]; ok { - id += v.(string) - } - - a.ids = append(a.ids, id) - a.backups = append(a.backups, ctx.backup) - - return nil + return a.additionalItems, nil } func TestResolveActions(t *testing.T) { @@ -119,7 +110,7 @@ func TestResolveActions(t *testing.T) { schema.GroupVersionResource{Resource: "bar"}: schema.GroupVersionResource{Group: "anothergroup", Resource: "barnacles"}, schema.GroupVersionResource{Resource: "baz"}: schema.GroupVersionResource{Group: "anothergroup", Resource: "bazaars"}, } - discoveryHelper := NewFakeDiscoveryHelper(false, resources) + discoveryHelper := arktest.NewFakeDiscoveryHelper(false, resources) actual, err := resolveActions(discoveryHelper, test.input) gotError := err != nil @@ -131,9 +122,7 @@ func TestResolveActions(t *testing.T) { return } - if e, a := test.expected, actual; !reflect.DeepEqual(e, a) { - t.Errorf("expected %v, got %v", e, a) - } + assert.Equal(t, test.expected, actual) }) } } @@ -190,14 +179,9 @@ func TestGetResourceIncludesExcludes(t *testing.T) { schema.GroupVersionResource{Resource: "bar"}: schema.GroupVersionResource{Group: "anothergroup", Resource: "barnacles"}, schema.GroupVersionResource{Resource: "baz"}: schema.GroupVersionResource{Group: "anothergroup", Resource: "bazaars"}, } - discoveryHelper := NewFakeDiscoveryHelper(false, resources) + discoveryHelper := arktest.NewFakeDiscoveryHelper(false, resources) - log, _ := testlogger.NewNullLogger() - - ctx := &backupContext{ - logger: log, - } - actual := ctx.getResourceIncludesExcludes(discoveryHelper, test.includes, test.excludes) + actual := getResourceIncludesExcludes(discoveryHelper, test.includes, test.excludes) sort.Strings(test.expectedIncludes) actualIncludes := actual.GetIncludes() @@ -243,6 +227,372 @@ func TestGetNamespaceIncludesExcludes(t *testing.T) { } } +var ( + v1Group = &metav1.APIResourceList{ + GroupVersion: "v1", + APIResources: []metav1.APIResource{configMapsResource, podsResource}, + } + + configMapsResource = metav1.APIResource{ + Name: "configmaps", + SingularName: "configmap", + Namespaced: true, + Kind: "ConfigMap", + Verbs: metav1.Verbs([]string{"create", "update", "get", "list", "watch", "delete"}), + ShortNames: []string{"cm"}, + Categories: []string{"all"}, + } + + podsResource = metav1.APIResource{ + Name: "pods", + SingularName: "pod", + Namespaced: true, + Kind: "Pod", + Verbs: metav1.Verbs([]string{"create", "update", "get", "list", "watch", "delete"}), + ShortNames: []string{"po"}, + Categories: []string{"all"}, + } + + rbacGroup = &metav1.APIResourceList{ + GroupVersion: "rbac.authorization.k8s.io/v1beta1", + APIResources: []metav1.APIResource{rolesResource}, + } + + rolesResource = metav1.APIResource{ + Name: "roles", + SingularName: "role", + Namespaced: true, + Kind: "Role", + Verbs: metav1.Verbs([]string{"create", "update", "get", "list", "watch", "delete"}), + } + + certificatesGroup = &metav1.APIResourceList{ + GroupVersion: "certificates.k8s.io/v1beta1", + APIResources: []metav1.APIResource{certificateSigningRequestsResource}, + } + + certificateSigningRequestsResource = metav1.APIResource{ + Name: "certificatesigningrequests", + SingularName: "certificatesigningrequest", + Namespaced: false, + Kind: "CertificateSigningRequest", + Verbs: metav1.Verbs([]string{"create", "update", "get", "list", "watch", "delete"}), + ShortNames: []string{"csr"}, + } + + extensionsGroup = &metav1.APIResourceList{ + GroupVersion: "extensions/v1beta1", + APIResources: []metav1.APIResource{deploymentsResource, networkPoliciesResource}, + } + + extensionsGroupVersion = schema.GroupVersion{ + Group: "extensions", + Version: "v1beta1", + } + + appsGroup = &metav1.APIResourceList{ + GroupVersion: "apps/v1beta1", + APIResources: []metav1.APIResource{deploymentsResource}, + } + + appsGroupVersion = schema.GroupVersion{ + Group: "apps", + Version: "v1beta1", + } + + deploymentsResource = metav1.APIResource{ + Name: "deployments", + SingularName: "deployment", + Namespaced: true, + Kind: "Deployment", + Verbs: metav1.Verbs([]string{"create", "update", "get", "list", "watch", "delete"}), + ShortNames: []string{"deploy"}, + Categories: []string{"all"}, + } + + networkingGroup = &metav1.APIResourceList{ + GroupVersion: "networking.k8s.io/v1", + APIResources: []metav1.APIResource{networkPoliciesResource}, + } + + networkingGroupVersion = schema.GroupVersion{ + Group: "networking.k8s.io", + Version: "v1", + } + + networkPoliciesResource = metav1.APIResource{ + Name: "networkpolicies", + SingularName: "networkpolicy", + Namespaced: true, + Kind: "Deployment", + Verbs: metav1.Verbs([]string{"create", "update", "get", "list", "watch", "delete"}), + } +) + +func parseLabelSelectorOrDie(s string) labels.Selector { + ret, err := labels.Parse(s) + if err != nil { + panic(err) + } + return ret +} + +func TestBackup(t *testing.T) { + tests := []struct { + name string + backup *v1.Backup + actions map[string]Action + expectedNamespaces *collections.IncludesExcludes + expectedResources *collections.IncludesExcludes + expectedLabelSelector string + expectedHooks []resourceHook + backupGroupErrors map[*metav1.APIResourceList]error + expectedError error + }{ + { + name: "happy path, no actions, no label selector, no hooks, no errors", + backup: &v1.Backup{ + Spec: v1.BackupSpec{ + // cm - shortcut in legacy api group + // csr - shortcut in certificates.k8s.io api group + // roles - fully qualified in rbac.authorization.k8s.io api group + IncludedResources: []string{"cm", "csr", "roles"}, + IncludedNamespaces: []string{"a", "b"}, + ExcludedNamespaces: []string{"c", "d"}, + }, + }, + actions: map[string]Action{}, + expectedNamespaces: collections.NewIncludesExcludes().Includes("a", "b").Excludes("c", "d"), + expectedResources: collections.NewIncludesExcludes().Includes("configmaps", "certificatesigningrequests.certificates.k8s.io", "roles.rbac.authorization.k8s.io"), + expectedHooks: []resourceHook{}, + backupGroupErrors: map[*metav1.APIResourceList]error{ + v1Group: nil, + certificatesGroup: nil, + rbacGroup: nil, + }, + }, + { + name: "label selector", + backup: &v1.Backup{ + Spec: v1.BackupSpec{ + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"a": "b"}, + }, + }, + }, + actions: map[string]Action{}, + expectedNamespaces: collections.NewIncludesExcludes(), + expectedResources: collections.NewIncludesExcludes(), + expectedHooks: []resourceHook{}, + expectedLabelSelector: "a=b", + backupGroupErrors: map[*metav1.APIResourceList]error{ + v1Group: nil, + certificatesGroup: nil, + rbacGroup: nil, + }, + }, + { + name: "backupGroup errors", + backup: &v1.Backup{}, + actions: map[string]Action{}, + expectedNamespaces: collections.NewIncludesExcludes(), + expectedResources: collections.NewIncludesExcludes(), + expectedHooks: []resourceHook{}, + backupGroupErrors: map[*metav1.APIResourceList]error{ + v1Group: errors.New("v1 error"), + certificatesGroup: nil, + rbacGroup: errors.New("rbac error"), + }, + expectedError: errors.New("[v1 error, rbac error]"), + }, + { + name: "hooks", + backup: &v1.Backup{ + Spec: v1.BackupSpec{ + Hooks: v1.BackupHooks{ + Resources: []v1.BackupResourceHookSpec{ + { + Name: "hook1", + IncludedNamespaces: []string{"a"}, + ExcludedNamespaces: []string{"b"}, + IncludedResources: []string{"cm"}, + ExcludedResources: []string{"roles"}, + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"1": "2"}, + }, + Hooks: []v1.BackupResourceHook{ + { + Exec: &v1.ExecHook{ + Command: []string{"ls", "/tmp"}, + }, + }, + }, + }, + }, + }, + }, + }, + actions: map[string]Action{}, + expectedNamespaces: collections.NewIncludesExcludes(), + expectedResources: collections.NewIncludesExcludes(), + expectedHooks: []resourceHook{ + { + name: "hook1", + namespaces: collections.NewIncludesExcludes().Includes("a").Excludes("b"), + resources: collections.NewIncludesExcludes().Includes("configmaps").Excludes("roles.rbac.authorization.k8s.io"), + labelSelector: parseLabelSelectorOrDie("1=2"), + hooks: []v1.BackupResourceHook{ + { + Exec: &v1.ExecHook{ + Command: []string{"ls", "/tmp"}, + }, + }, + }, + }, + }, + backupGroupErrors: map[*metav1.APIResourceList]error{ + v1Group: nil, + certificatesGroup: nil, + rbacGroup: nil, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + discoveryHelper := &arktest.FakeDiscoveryHelper{ + Mapper: &arktest.FakeMapper{ + Resources: map[schema.GroupVersionResource]schema.GroupVersionResource{ + schema.GroupVersionResource{Resource: "cm"}: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"}, + schema.GroupVersionResource{Resource: "csr"}: schema.GroupVersionResource{Group: "certificates.k8s.io", Version: "v1beta1", Resource: "certificatesigningrequests"}, + schema.GroupVersionResource{Resource: "roles"}: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "roles"}, + }, + }, + ResourceList: []*metav1.APIResourceList{ + v1Group, + certificatesGroup, + rbacGroup, + }, + } + + dynamicFactory := &arktest.FakeDynamicFactory{} + + podCommandExecutor := &mockPodCommandExecutor{} + defer podCommandExecutor.AssertExpectations(t) + + b, err := NewKubernetesBackupper( + discoveryHelper, + dynamicFactory, + test.actions, + podCommandExecutor, + ) + require.NoError(t, err) + kb := b.(*kubernetesBackupper) + + groupBackupperFactory := &mockGroupBackupperFactory{} + defer groupBackupperFactory.AssertExpectations(t) + kb.groupBackupperFactory = groupBackupperFactory + + groupBackupper := &mockGroupBackupper{} + defer groupBackupper.AssertExpectations(t) + + cohabitatingResources := map[string]*cohabitatingResource{ + "deployments": newCohabitatingResource("deployments", "extensions", "apps"), + "networkpolicies": newCohabitatingResource("networkpolicies", "extensions", "networking.k8s.io"), + } + + groupBackupperFactory.On("newGroupBackupper", + mock.Anything, // log + test.backup, + test.expectedNamespaces, + test.expectedResources, + test.expectedLabelSelector, + dynamicFactory, + discoveryHelper, + map[itemKey]struct{}{}, // backedUpItems + cohabitatingResources, + kb.actions, + kb.podCommandExecutor, + mock.Anything, // tarWriter + test.expectedHooks, + ).Return(groupBackupper) + + for group, err := range test.backupGroupErrors { + groupBackupper.On("backupGroup", group).Return(err) + } + + var backupFile, logFile bytes.Buffer + + err = b.Backup(test.backup, &backupFile, &logFile) + defer func() { + // print log if anything failed + if t.Failed() { + gzr, err := gzip.NewReader(&logFile) + require.NoError(t, err) + t.Log("Backup log contents:") + var buf bytes.Buffer + _, err = io.Copy(&buf, gzr) + require.NoError(t, err) + require.NoError(t, gzr.Close()) + t.Log(buf.String()) + } + }() + + if test.expectedError != nil { + assert.EqualError(t, err, test.expectedError.Error()) + return + } + assert.NoError(t, err) + }) + } +} + +type mockGroupBackupperFactory struct { + mock.Mock +} + +func (f *mockGroupBackupperFactory) newGroupBackupper( + log *logrus.Entry, + backup *v1.Backup, + namespaces, resources *collections.IncludesExcludes, + labelSelector string, + dynamicFactory client.DynamicFactory, + discoveryHelper discovery.Helper, + backedUpItems map[itemKey]struct{}, + cohabitatingResources map[string]*cohabitatingResource, + actions map[schema.GroupResource]Action, + podCommandExecutor podCommandExecutor, + tarWriter tarWriter, + resourceHooks []resourceHook, +) groupBackupper { + args := f.Called( + log, + backup, + namespaces, + resources, + labelSelector, + dynamicFactory, + discoveryHelper, + backedUpItems, + cohabitatingResources, + actions, + podCommandExecutor, + tarWriter, + resourceHooks, + ) + return args.Get(0).(groupBackupper) +} + +type mockGroupBackupper struct { + mock.Mock +} + +func (gb *mockGroupBackupper) backupGroup(group *metav1.APIResourceList) error { + args := gb.Called(group) + return args.Error(0) +} + +/* func TestBackupMethod(t *testing.T) { // TODO ensure LabelSelector is passed through to the List() calls backup := &v1.Backup{ @@ -293,8 +643,8 @@ func TestBackupMethod(t *testing.T) { ShortNames: []string{"csr"}, } - discoveryHelper := &FakeDiscoveryHelper{ - Mapper: &FakeMapper{ + discoveryHelper := &arktest.FakeDiscoveryHelper{ + Mapper: &arktest.FakeMapper{ Resources: map[schema.GroupVersionResource]schema.GroupVersionResource{ schema.GroupVersionResource{Resource: "cm"}: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"}, schema.GroupVersionResource{Resource: "csr"}: schema.GroupVersionResource{Group: "certificates.k8s.io", Version: "v1beta1", Resource: "certificatesigningrequests"}, @@ -317,11 +667,11 @@ func TestBackupMethod(t *testing.T) { }, } - dynamicFactory := &FakeDynamicFactory{} + dynamicFactory := &arktest.FakeDynamicFactory{} legacyGV := schema.GroupVersionResource{Version: "v1"} - configMapsClientA := &FakeDynamicClient{} + configMapsClientA := &arktest.FakeDynamicClient{} configMapsA := toRuntimeObject(t, `{ "apiVersion": "v1", "kind": "ConfigMapList", @@ -340,7 +690,7 @@ func TestBackupMethod(t *testing.T) { configMapsClientA.On("List", metav1.ListOptions{}).Return(configMapsA, nil) dynamicFactory.On("ClientForGroupVersionResource", legacyGV, configMapsResource, "a").Return(configMapsClientA, nil) - configMapsClientB := &FakeDynamicClient{} + configMapsClientB := &arktest.FakeDynamicClient{} configMapsB := toRuntimeObject(t, `{ "apiVersion": "v1", "kind": "ConfigMapList", @@ -384,7 +734,7 @@ func TestBackupMethod(t *testing.T) { } ] }`) - csrClient := &FakeDynamicClient{} + csrClient := &arktest.FakeDynamicClient{} csrClient.On("List", metav1.ListOptions{}).Return(csrList, nil) dynamicFactory.On("ClientForGroupVersionResource", certificatesGV, certificateSigningRequestsResource, "").Return(csrClient, nil) @@ -416,10 +766,10 @@ func TestBackupMethod(t *testing.T) { rbacGV := schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1beta1"} - rolesClientA := &FakeDynamicClient{} + rolesClientA := &arktest.FakeDynamicClient{} rolesClientA.On("List", metav1.ListOptions{}).Return(roleListA, nil) dynamicFactory.On("ClientForGroupVersionResource", rbacGV, rolesResource, "a").Return(rolesClientA, nil) - rolesClientB := &FakeDynamicClient{} + rolesClientB := &arktest.FakeDynamicClient{} rolesClientB.On("List", metav1.ListOptions{}).Return(roleListB, nil) dynamicFactory.On("ClientForGroupVersionResource", rbacGV, rolesResource, "b").Return(rolesClientB, nil) @@ -431,11 +781,27 @@ func TestBackupMethod(t *testing.T) { "csr": csrAction, } - backupper, err := NewKubernetesBackupper(discoveryHelper, dynamicFactory, actions) + podCommandExecutor := &arktest.PodCommandExecutor{} + defer podCommandExecutor.AssertExpectations(t) + + backupper, err := NewKubernetesBackupper(discoveryHelper, dynamicFactory, actions, podCommandExecutor) require.NoError(t, err) - output := new(bytes.Buffer) - err = backupper.Backup(backup, output, ioutil.Discard) + var output, log bytes.Buffer + err = backupper.Backup(backup, &output, &log) + defer func() { + // print log if anything failed + if t.Failed() { + gzr, err := gzip.NewReader(&log) + require.NoError(t, err) + t.Log("Backup log contents:") + var buf bytes.Buffer + _, err = io.Copy(&buf, gzr) + require.NoError(t, err) + require.NoError(t, gzr.Close()) + t.Log(buf.String()) + } + }() require.NoError(t, err) expectedFiles := sets.NewString( @@ -493,7 +859,7 @@ func TestBackupMethod(t *testing.T) { seenFiles := sets.NewString() - gzipReader, err := gzip.NewReader(output) + gzipReader, err := gzip.NewReader(&output) require.NoError(t, err) defer gzipReader.Close() @@ -543,771 +909,7 @@ func TestBackupMethod(t *testing.T) { // CSRs are not expected because they're unrelated cluster-scoped resources assert.Nil(t, csrAction.ids) } - -func TestBackupResource(t *testing.T) { - tests := []struct { - name string - resourceIncludesExcludes *collections.IncludesExcludes - resourceGroup string - resourceVersion string - resourceGV string - resourceName string - resourceNamespaced bool - namespaceIncludesExcludes *collections.IncludesExcludes - expectedListedNamespaces []string - lists []string - labelSelector string - actions map[string]Action - expectedActionIDs map[string][]string - deploymentsBackedUp bool - expectedDeploymentsBackedUp bool - networkPoliciesBackedUp bool - expectedNetworkPoliciesBackedUp bool - includeClusterResources *bool - }{ - { - name: "should not include resource", - resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("pods"), - resourceGV: "v1", - resourceName: "secrets", - resourceNamespaced: true, - }, - { - name: "should skip deployments.extensions if we've seen deployments.apps", - resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - resourceGV: "extensions/v1beta1", - resourceName: "deployments", - resourceNamespaced: true, - deploymentsBackedUp: true, - expectedDeploymentsBackedUp: true, - }, - { - name: "should skip deployments.apps if we've seen deployments.extensions", - resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - resourceGV: "apps/v1beta1", - resourceName: "deployments", - resourceNamespaced: true, - deploymentsBackedUp: true, - expectedDeploymentsBackedUp: true, - }, - { - name: "should skip networkpolicies.extensions if we've seen networkpolicies.networking.k8s.io", - resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - resourceGV: "extensions/v1beta1", - resourceName: "networkpolicies", - resourceNamespaced: true, - networkPoliciesBackedUp: true, - expectedNetworkPoliciesBackedUp: true, - }, - { - name: "should skip networkpolicies.networking.k8s.io if we've seen networkpolicies.extensions", - resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - resourceGV: "networking.k8s.io/v1", - resourceName: "networkpolicies", - resourceNamespaced: true, - networkPoliciesBackedUp: true, - expectedNetworkPoliciesBackedUp: true, - }, - { - name: "should include deployments.extensions if we haven't seen deployments.apps", - resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - resourceGroup: "extensions", - resourceVersion: "v1beta1", - resourceGV: "extensions/v1beta1", - resourceName: "deployments", - resourceNamespaced: true, - deploymentsBackedUp: false, - namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - lists: []string{ - `{ - "apiVersion": "extensions/v1beta1", - "kind": "DeploymentList", - "items": [ - { - "metadata": { - "namespace": "a", - "name": "1" - } - } - ] - }`, - }, - expectedListedNamespaces: []string{""}, - expectedDeploymentsBackedUp: true, - }, - { - name: "should include deployments.apps if we haven't seen deployments.extensions", - resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - resourceGroup: "apps", - resourceVersion: "v1beta1", - resourceGV: "apps/v1beta1", - resourceName: "deployments", - resourceNamespaced: true, - deploymentsBackedUp: false, - namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - lists: []string{ - `{ - "apiVersion": "apps/v1beta1", - "kind": "DeploymentList", - "items": [ - { - "metadata": { - "namespace": "a", - "name": "1" - } - } - ] - }`, - }, - expectedListedNamespaces: []string{""}, - expectedDeploymentsBackedUp: true, - }, - { - name: "should include networkpolicies.extensions if we haven't seen networkpolicies.networking.k8s.io", - resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - resourceGroup: "extensions", - resourceVersion: "v1beta1", - resourceGV: "extensions/v1beta1", - resourceName: "networkpolicies", - resourceNamespaced: true, - networkPoliciesBackedUp: false, - namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - lists: []string{ - `{ - "apiVersion": "extensions/v1beta1", - "kind": "NetworkPolicyList", - "items": [ - { - "metadata": { - "namespace": "a", - "name": "1" - } - } - ] - }`, - }, - expectedListedNamespaces: []string{""}, - expectedNetworkPoliciesBackedUp: true, - }, - { - name: "should include networkpolicies.networking.k8s.io if we haven't seen networkpolicies.extensions", - resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - resourceGroup: "networking.k8s.io", - resourceVersion: "v1", - resourceGV: "networking.k8s.io/v1", - resourceName: "networkpolicies", - resourceNamespaced: true, - networkPoliciesBackedUp: false, - namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - lists: []string{ - `{ - "apiVersion": "networking.k8s.io/v1", - "kind": "NetworkPolicyList", - "items": [ - { - "metadata": { - "namespace": "a", - "name": "1" - } - } - ] - }`, - }, - expectedListedNamespaces: []string{""}, - expectedNetworkPoliciesBackedUp: true, - }, - { - name: "list per namespace when not including *", - resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - resourceGroup: "apps", - resourceVersion: "v1beta1", - resourceGV: "apps/v1beta1", - resourceName: "deployments", - resourceNamespaced: true, - namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("a", "b"), - expectedListedNamespaces: []string{"a", "b"}, - lists: []string{ - `{ - "apiVersion": "apps/v1beta1", - "kind": "DeploymentList", - "items": [ - { - "metadata": { - "namespace": "a", - "name": "1" - } - } - ] - }`, - `{ - "apiVersion": "apps/v1beta1v1", - "kind": "DeploymentList", - "items": [ - { - "metadata": { - "namespace": "b", - "name": "2" - } - } - ] - }`, - }, - expectedDeploymentsBackedUp: true, - }, - { - name: "list all namespaces when including *", - resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - resourceGroup: "networking.k8s.io", - resourceVersion: "v1", - resourceGV: "networking.k8s.io/v1", - resourceName: "networkpolicies", - resourceNamespaced: true, - namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - expectedListedNamespaces: []string{""}, - lists: []string{ - `{ - "apiVersion": "networking.k8s.io/v1", - "kind": "NetworkPolicyList", - "items": [ - { - "metadata": { - "namespace": "a", - "name": "1" - } - } - ] - }`, - }, - expectedNetworkPoliciesBackedUp: true, - }, - { - name: "list all namespaces when cluster-scoped, even with namespace includes", - resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - resourceGroup: "certificates.k8s.io", - resourceVersion: "v1beta1", - resourceGV: "certificates.k8s.io/v1beta1", - resourceName: "certificatesigningrequests", - resourceNamespaced: false, - namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("a"), - expectedListedNamespaces: []string{""}, - labelSelector: "a=b", - lists: []string{ - `{ - "apiVersion": "certifiaces.k8s.io/v1beta1", - "kind": "CertificateSigningRequestList", - "items": [ - { - "metadata": { - "name": "1", - "labels": { - "a": "b" - } - } - } - ] - }`, - }, - }, - { - name: "use a custom action", - resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - resourceGroup: "certificates.k8s.io", - resourceVersion: "v1beta1", - resourceGV: "certificates.k8s.io/v1beta1", - resourceName: "certificatesigningrequests", - resourceNamespaced: false, - namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("a"), - expectedListedNamespaces: []string{""}, - labelSelector: "a=b", - lists: []string{ - `{ - "apiVersion": "certificates.k8s.io/v1beta1", - "kind": "CertificateSigningRequestList", - "items": [ - { - "metadata": { - "name": "1", - "labels": { - "a": "b" - } - } - } - ] -}`, - }, - actions: map[string]Action{ - "certificatesigningrequests": &fakeAction{}, - "other": &fakeAction{}, - }, - expectedActionIDs: map[string][]string{ - "certificatesigningrequests": {"1"}, - }, - }, - { - name: "should include cluster-scoped resource if backing up subset of namespaces and --include-cluster-resources=true", - resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - resourceGroup: "foogroup", - resourceVersion: "v1", - resourceGV: "foogroup/v1", - resourceName: "bars", - resourceNamespaced: false, - namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("ns-1"), - includeClusterResources: truePointer, - lists: []string{ - `{ - "apiVersion": "foogroup/v1", - "kind": "BarList", - "items": [ - { - "metadata": { - "namespace": "", - "name": "1" - } - } - ] - }`, - }, - expectedListedNamespaces: []string{""}, - }, - { - name: "should not include cluster-scoped resource if backing up subset of namespaces and --include-cluster-resources=false", - resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - resourceGroup: "foogroup", - resourceVersion: "v1", - resourceGV: "foogroup/v1", - resourceName: "bars", - resourceNamespaced: false, - namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("ns-1"), - includeClusterResources: falsePointer, - }, - { - name: "should not include cluster-scoped resource if backing up subset of namespaces and --include-cluster-resources=", - resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - resourceGroup: "foogroup", - resourceVersion: "v1", - resourceGV: "foogroup/v1", - resourceName: "bars", - resourceNamespaced: false, - namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("ns-1"), - includeClusterResources: nil, - }, - { - name: "should include cluster-scoped resources if backing up all namespaces and --include-cluster-resources=true", - resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - resourceGroup: "foogroup", - resourceVersion: "v1", - resourceGV: "foogroup/v1", - resourceName: "bars", - resourceNamespaced: false, - namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - includeClusterResources: truePointer, - lists: []string{ - `{ - "apiVersion": "foogroup/v1", - "kind": "BarList", - "items": [ - { - "metadata": { - "namespace": "", - "name": "1" - } - } - ] - }`, - }, - expectedListedNamespaces: []string{""}, - }, - { - name: "should not include cluster-scoped resource if backing up all namespaces and --include-cluster-resources=false", - resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - resourceGroup: "foogroup", - resourceVersion: "v1", - resourceGV: "foogroup/v1", - resourceName: "bars", - resourceNamespaced: false, - namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - includeClusterResources: falsePointer, - }, - { - name: "should include cluster-scoped resource if backing up all namespaces and --include-cluster-resources=", - resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - resourceGroup: "foogroup", - resourceVersion: "v1", - resourceGV: "foogroup/v1", - resourceName: "bars", - resourceNamespaced: false, - namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - includeClusterResources: nil, - lists: []string{ - `{ - "apiVersion": "foogroup/v1", - "kind": "BarList", - "items": [ - { - "metadata": { - "namespace": "", - "name": "1" - } - } - ] - }`, - }, - expectedListedNamespaces: []string{""}, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var labelSelector *metav1.LabelSelector - if test.labelSelector != "" { - s, err := metav1.ParseToLabelSelector(test.labelSelector) - require.NoError(t, err) - labelSelector = s - } - - log, _ := testlogger.NewNullLogger() - - ctx := &backupContext{ - backup: &v1.Backup{ - Spec: v1.BackupSpec{ - LabelSelector: labelSelector, - IncludeClusterResources: test.includeClusterResources, - }, - }, - resourceIncludesExcludes: test.resourceIncludesExcludes, - namespaceIncludesExcludes: test.namespaceIncludesExcludes, - deploymentsBackedUp: test.deploymentsBackedUp, - networkPoliciesBackedUp: test.networkPoliciesBackedUp, - logger: log, - } - - group := &metav1.APIResourceList{ - GroupVersion: test.resourceGV, - } - - resource := metav1.APIResource{Name: test.resourceName, Namespaced: test.resourceNamespaced} - - itemBackupper := &fakeItemBackupper{} - - var actualActionIDs map[string][]string - - dynamicFactory := &FakeDynamicFactory{} - gvr := schema.GroupVersionResource{Group: test.resourceGroup, Version: test.resourceVersion} - gr := schema.GroupResource{Group: test.resourceGroup, Resource: test.resourceName} - for i, namespace := range test.expectedListedNamespaces { - obj := toRuntimeObject(t, test.lists[i]) - - client := &FakeDynamicClient{} - client.On("List", metav1.ListOptions{LabelSelector: test.labelSelector}).Return(obj, nil) - dynamicFactory.On("ClientForGroupVersionResource", gvr, resource, namespace).Return(client, nil) - - action := test.actions[test.resourceName] - - list, err := meta.ExtractList(obj) - require.NoError(t, err) - for i := range list { - item := list[i].(*unstructured.Unstructured) - itemBackupper.On("backupItem", ctx, item.Object, gr).Return(nil) - if action != nil { - a, err := meta.Accessor(item) - require.NoError(t, err) - ns := a.GetNamespace() - name := a.GetName() - id := ns - if id != "" { - id += "/" - } - id += name - if actualActionIDs == nil { - actualActionIDs = make(map[string][]string) - } - actualActionIDs[test.resourceName] = append(actualActionIDs[test.resourceName], id) - } - } - } - - resources := map[schema.GroupVersionResource]schema.GroupVersionResource{ - schema.GroupVersionResource{Resource: "certificatesigningrequests"}: schema.GroupVersionResource{Group: "certificates.k8s.io", Version: "v1beta1", Resource: "certificatesigningrequests"}, - schema.GroupVersionResource{Resource: "other"}: schema.GroupVersionResource{Group: "somegroup", Version: "someversion", Resource: "otherthings"}, - } - discoveryHelper := NewFakeDiscoveryHelper(false, resources) - - kb, err := NewKubernetesBackupper(discoveryHelper, dynamicFactory, test.actions) - require.NoError(t, err) - backupper := kb.(*kubernetesBackupper) - backupper.itemBackupper = itemBackupper - - err = backupper.backupResource(ctx, group, resource) - - assert.Equal(t, test.expectedDeploymentsBackedUp, ctx.deploymentsBackedUp) - assert.Equal(t, test.expectedNetworkPoliciesBackedUp, ctx.networkPoliciesBackedUp) - assert.Equal(t, test.expectedActionIDs, actualActionIDs) - }) - } -} - -type fakeItemBackupper struct { - mock.Mock -} - -func (f *fakeItemBackupper) backupItem(ctx *backupContext, obj map[string]interface{}, groupResource schema.GroupResource) error { - args := f.Called(ctx, obj, groupResource) - return args.Error(0) -} - -type fakeTarWriter struct { - closeCalled bool - headers []*tar.Header - data [][]byte - writeHeaderError error - writeError error -} - -func (w *fakeTarWriter) Close() error { return nil } - -func (w *fakeTarWriter) Write(data []byte) (int, error) { - w.data = append(w.data, data) - return 0, w.writeError -} - -func (w *fakeTarWriter) WriteHeader(header *tar.Header) error { - w.headers = append(w.headers, header) - return w.writeHeaderError -} - -func TestBackupItem(t *testing.T) { - tests := []struct { - name string - item string - namespaceIncludesExcludes *collections.IncludesExcludes - resourceIncludesExcludes *collections.IncludesExcludes - includeClusterResources *bool - backedUpItems map[itemKey]struct{} - expectError bool - expectExcluded bool - expectedTarHeaderName string - tarWriteError bool - tarHeaderWriteError bool - customAction bool - expectedActionID string - }{ - { - name: "empty map", - item: "{}", - expectError: true, - }, - { - name: "missing name", - item: `{"metadata":{}}`, - expectError: true, - }, - { - name: "excluded by namespace", - item: `{"metadata":{"namespace":"foo","name":"bar"}}`, - namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*").Excludes("foo"), - expectError: false, - expectExcluded: true, - }, - { - name: "explicit namespace include", - item: `{"metadata":{"namespace":"foo","name":"bar"}}`, - namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("foo"), - expectError: false, - expectExcluded: false, - expectedTarHeaderName: "resources/resource.group/namespaces/foo/bar.json", - }, - { - name: "* namespace include", - item: `{"metadata":{"namespace":"foo","name":"bar"}}`, - namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - expectError: false, - expectExcluded: false, - expectedTarHeaderName: "resources/resource.group/namespaces/foo/bar.json", - }, - { - name: "cluster-scoped", - item: `{"metadata":{"name":"bar"}}`, - expectError: false, - expectExcluded: false, - expectedTarHeaderName: "resources/resource.group/cluster/bar.json", - }, - { - name: "make sure status is deleted", - item: `{"metadata":{"name":"bar"},"spec":{"color":"green"},"status":{"foo":"bar"}}`, - expectError: false, - expectExcluded: false, - expectedTarHeaderName: "resources/resource.group/cluster/bar.json", - }, - { - name: "tar header write error", - item: `{"metadata":{"name":"bar"},"spec":{"color":"green"},"status":{"foo":"bar"}}`, - expectError: true, - tarHeaderWriteError: true, - }, - { - name: "tar write error", - item: `{"metadata":{"name":"bar"},"spec":{"color":"green"},"status":{"foo":"bar"}}`, - expectError: true, - tarWriteError: true, - }, - { - name: "action invoked - cluster-scoped", - namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - item: `{"metadata":{"name":"bar"}}`, - expectError: false, - expectExcluded: false, - expectedTarHeaderName: "resources/resource.group/cluster/bar.json", - customAction: true, - expectedActionID: "bar", - }, - { - name: "action invoked - namespaced", - namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - item: `{"metadata":{"namespace": "myns", "name":"bar"}}`, - expectError: false, - expectExcluded: false, - expectedTarHeaderName: "resources/resource.group/namespaces/myns/bar.json", - customAction: true, - expectedActionID: "myns/bar", - }, - { - name: "cluster-scoped item not backed up when --include-cluster-resources=false", - item: `{"metadata":{"namespace":"","name":"bar"}}`, - namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - includeClusterResources: falsePointer, - expectError: false, - expectExcluded: true, - }, - { - name: "item not backed up when resource includes/excludes excludes it", - item: `{"metadata":{"namespace":"","name":"bar"}}`, - namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*").Excludes("resource.group"), - expectError: false, - expectExcluded: true, - }, - { - name: "item not backed up when it's already been backed up", - item: `{"metadata":{"namespace":"","name":"bar"}}`, - namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - backedUpItems: map[itemKey]struct{}{itemKey{resource: "resource.group", namespace: "", name: "bar"}: struct{}{}}, - expectError: false, - expectExcluded: true, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - item, err := getAsMap(test.item) - if err != nil { - t.Fatal(err) - } - - namespaces := test.namespaceIncludesExcludes - if namespaces == nil { - namespaces = collections.NewIncludesExcludes() - } - - w := &fakeTarWriter{} - if test.tarHeaderWriteError { - w.writeHeaderError = errors.New("error") - } - if test.tarWriteError { - w.writeError = errors.New("error") - } - - var ( - action *fakeAction - backup = &v1.Backup{Spec: v1.BackupSpec{IncludeClusterResources: test.includeClusterResources}} - groupResource = schema.ParseGroupResource("resource.group") - log, _ = testlogger.NewNullLogger() - ) - - ctx := &backupContext{ - backup: backup, - namespaceIncludesExcludes: namespaces, - w: w, - logger: log, - backedUpItems: make(map[itemKey]struct{}), - resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - } - - if test.resourceIncludesExcludes != nil { - ctx.resourceIncludesExcludes = test.resourceIncludesExcludes - } - - if test.backedUpItems != nil { - ctx.backedUpItems = test.backedUpItems - } - - if test.customAction { - action = &fakeAction{} - ctx.actions = map[schema.GroupResource]Action{ - groupResource: action, - } - backup = ctx.backup - } - - b := &realItemBackupper{} - err = b.backupItem(ctx, item, groupResource) - gotError := err != nil - if e, a := test.expectError, gotError; e != a { - t.Fatalf("error: expected %t, got %t", e, a) - } - if test.expectError { - return - } - - if test.expectExcluded { - if len(w.headers) > 0 { - t.Errorf("unexpected header write") - } - if len(w.data) > 0 { - t.Errorf("unexpected data write") - } - return - } - - // we have to delete status as that's what backupItem does, - // and this ensures that we're verifying the right data - delete(item, "status") - itemWithoutStatus, err := json.Marshal(&item) - if err != nil { - t.Fatal(err) - } - - require.Equal(t, 1, len(w.headers), "headers") - assert.Equal(t, test.expectedTarHeaderName, w.headers[0].Name, "header.name") - assert.Equal(t, int64(len(itemWithoutStatus)), w.headers[0].Size, "header.size") - assert.Equal(t, byte(tar.TypeReg), w.headers[0].Typeflag, "header.typeflag") - assert.Equal(t, int64(0755), w.headers[0].Mode, "header.mode") - assert.False(t, w.headers[0].ModTime.IsZero(), "header.modTime set") - assert.Equal(t, 1, len(w.data), "# of data") - - actual, err := getAsMap(string(w.data[0])) - if err != nil { - t.Fatal(err) - } - if e, a := item, actual; !reflect.DeepEqual(e, a) { - t.Errorf("data: expected %s, got %s", e, a) - } - - if test.customAction { - if len(action.ids) != 1 { - t.Errorf("unexpected custom action ids: %v", action.ids) - } else if e, a := test.expectedActionID, action.ids[0]; e != a { - t.Errorf("action.ids[0]: expected %s, got %s", e, a) - } - - if len(action.backups) != 1 { - t.Errorf("unexpected custom action backups: %#v", action.backups) - } else if e, a := backup, action.backups[0]; e != a { - t.Errorf("action.backups[0]: expected %#v, got %#v", e, a) - } - } - }) - } -} +*/ func getAsMap(j string) (map[string]interface{}, error) { m := make(map[string]interface{}) @@ -1320,3 +922,11 @@ func toRuntimeObject(t *testing.T, data string) runtime.Object { require.NoError(t, err) return o } + +func unstructuredOrDie(data string) *unstructured.Unstructured { + o, _, err := unstructured.UnstructuredJSONScheme.Decode([]byte(data), nil, nil) + if err != nil { + panic(err) + } + return o.(*unstructured.Unstructured) +} diff --git a/pkg/backup/group_backupper.go b/pkg/backup/group_backupper.go new file mode 100644 index 000000000..877dd38b6 --- /dev/null +++ b/pkg/backup/group_backupper.go @@ -0,0 +1,151 @@ +/* +Copyright 2017 the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backup + +import ( + "strings" + + "github.com/heptio/ark/pkg/apis/ark/v1" + "github.com/heptio/ark/pkg/client" + "github.com/heptio/ark/pkg/discovery" + "github.com/heptio/ark/pkg/util/collections" + "github.com/sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + kuberrs "k8s.io/apimachinery/pkg/util/errors" +) + +type groupBackupperFactory interface { + newGroupBackupper( + log *logrus.Entry, + backup *v1.Backup, + namespaces, resources *collections.IncludesExcludes, + labelSelector string, + dynamicFactory client.DynamicFactory, + discoveryHelper discovery.Helper, + backedUpItems map[itemKey]struct{}, + cohabitatingResources map[string]*cohabitatingResource, + actions map[schema.GroupResource]Action, + podCommandExecutor podCommandExecutor, + tarWriter tarWriter, + resourceHooks []resourceHook, + ) groupBackupper +} + +type defaultGroupBackupperFactory struct{} + +func (f *defaultGroupBackupperFactory) newGroupBackupper( + log *logrus.Entry, + backup *v1.Backup, + namespaces, resources *collections.IncludesExcludes, + labelSelector string, + dynamicFactory client.DynamicFactory, + discoveryHelper discovery.Helper, + backedUpItems map[itemKey]struct{}, + cohabitatingResources map[string]*cohabitatingResource, + actions map[schema.GroupResource]Action, + podCommandExecutor podCommandExecutor, + tarWriter tarWriter, + resourceHooks []resourceHook, +) groupBackupper { + return &defaultGroupBackupper{ + log: log, + backup: backup, + namespaces: namespaces, + resources: resources, + labelSelector: labelSelector, + dynamicFactory: dynamicFactory, + discoveryHelper: discoveryHelper, + backedUpItems: backedUpItems, + cohabitatingResources: cohabitatingResources, + actions: actions, + podCommandExecutor: podCommandExecutor, + tarWriter: tarWriter, + resourceHooks: resourceHooks, + + resourceBackupperFactory: &defaultResourceBackupperFactory{}, + } +} + +type groupBackupper interface { + backupGroup(group *metav1.APIResourceList) error +} + +type defaultGroupBackupper struct { + log *logrus.Entry + backup *v1.Backup + namespaces, resources *collections.IncludesExcludes + labelSelector string + dynamicFactory client.DynamicFactory + discoveryHelper discovery.Helper + backedUpItems map[itemKey]struct{} + cohabitatingResources map[string]*cohabitatingResource + actions map[schema.GroupResource]Action + podCommandExecutor podCommandExecutor + tarWriter tarWriter + resourceHooks []resourceHook + resourceBackupperFactory resourceBackupperFactory +} + +// backupGroup backs up a single API group. +func (gb *defaultGroupBackupper) backupGroup(group *metav1.APIResourceList) error { + var ( + errs []error + pv *metav1.APIResource + log = gb.log.WithField("group", group.GroupVersion) + rb = gb.resourceBackupperFactory.newResourceBackupper( + log, + gb.backup, + gb.namespaces, + gb.resources, + gb.labelSelector, + gb.dynamicFactory, + gb.discoveryHelper, + gb.backedUpItems, + gb.cohabitatingResources, + gb.actions, + gb.podCommandExecutor, + gb.tarWriter, + gb.resourceHooks, + ) + ) + + log.Infof("Backing up group") + + processResource := func(resource metav1.APIResource) { + if err := rb.backupResource(group, resource); err != nil { + errs = append(errs, err) + } + } + + for _, resource := range group.APIResources { + // do PVs last because if we're also backing up PVCs, we want to backup PVs within the scope of + // the PVCs (within the PVC action) to allow for hooks to run + if strings.ToLower(resource.Name) == "persistentvolumes" && strings.ToLower(group.GroupVersion) == "v1" { + pvResource := resource + pv = &pvResource + continue + } + processResource(resource) + } + + if pv != nil { + processResource(*pv) + } + + return kuberrs.NewAggregate(errs) +} diff --git a/pkg/backup/group_backupper_test.go b/pkg/backup/group_backupper_test.go new file mode 100644 index 000000000..a5a57daff --- /dev/null +++ b/pkg/backup/group_backupper_test.go @@ -0,0 +1,182 @@ +/* +Copyright 2017 the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backup + +import ( + "testing" + + "github.com/heptio/ark/pkg/apis/ark/v1" + "github.com/heptio/ark/pkg/client" + "github.com/heptio/ark/pkg/discovery" + "github.com/heptio/ark/pkg/util/collections" + arktest "github.com/heptio/ark/pkg/util/test" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func TestBackupGroup(t *testing.T) { + backup := &v1.Backup{} + + namespaces := collections.NewIncludesExcludes().Includes("a") + resources := collections.NewIncludesExcludes().Includes("b") + labelSelector := "foo=bar" + + dynamicFactory := &arktest.FakeDynamicFactory{} + defer dynamicFactory.AssertExpectations(t) + + discoveryHelper := arktest.NewFakeDiscoveryHelper(true, nil) + + backedUpItems := map[itemKey]struct{}{ + {resource: "a", namespace: "b", name: "c"}: struct{}{}, + } + + cohabitatingResources := map[string]*cohabitatingResource{ + "a": { + resource: "a", + groupResource1: schema.GroupResource{Group: "g1", Resource: "a"}, + groupResource2: schema.GroupResource{Group: "g2", Resource: "a"}, + }, + } + + actions := map[schema.GroupResource]Action{ + schema.GroupResource{Group: "", Resource: "pods"}: &fakeAction{}, + } + + podCommandExecutor := &mockPodCommandExecutor{} + defer podCommandExecutor.AssertExpectations(t) + + tarWriter := &fakeTarWriter{} + + resourceHooks := []resourceHook{ + {name: "myhook"}, + } + + gb := (&defaultGroupBackupperFactory{}).newGroupBackupper( + arktest.NewLogger(), + backup, + namespaces, + resources, + labelSelector, + dynamicFactory, + discoveryHelper, + backedUpItems, + cohabitatingResources, + actions, + podCommandExecutor, + tarWriter, + resourceHooks, + ).(*defaultGroupBackupper) + + resourceBackupperFactory := &mockResourceBackupperFactory{} + defer resourceBackupperFactory.AssertExpectations(t) + gb.resourceBackupperFactory = resourceBackupperFactory + + resourceBackupper := &mockResourceBackupper{} + defer resourceBackupper.AssertExpectations(t) + + resourceBackupperFactory.On("newResourceBackupper", + mock.Anything, + backup, + namespaces, + resources, + labelSelector, + dynamicFactory, + discoveryHelper, + backedUpItems, + cohabitatingResources, + actions, + podCommandExecutor, + tarWriter, + resourceHooks, + ).Return(resourceBackupper) + + group := &metav1.APIResourceList{ + GroupVersion: "v1", + APIResources: []metav1.APIResource{ + {Name: "persistentvolumes"}, + {Name: "pods"}, + {Name: "persistentvolumeclaims"}, + }, + } + + expectedOrder := []string{"pods", "persistentvolumeclaims", "persistentvolumes"} + var actualOrder []string + + runFunc := func(args mock.Arguments) { + actualOrder = append(actualOrder, args.Get(1).(metav1.APIResource).Name) + } + + resourceBackupper.On("backupResource", group, metav1.APIResource{Name: "pods"}).Return(nil).Run(runFunc) + resourceBackupper.On("backupResource", group, metav1.APIResource{Name: "persistentvolumeclaims"}).Return(nil).Run(runFunc) + resourceBackupper.On("backupResource", group, metav1.APIResource{Name: "persistentvolumes"}).Return(nil).Run(runFunc) + + err := gb.backupGroup(group) + require.NoError(t, err) + + // make sure PVs were last + assert.Equal(t, expectedOrder, actualOrder) +} + +type mockResourceBackupperFactory struct { + mock.Mock +} + +func (rbf *mockResourceBackupperFactory) newResourceBackupper( + log *logrus.Entry, + backup *v1.Backup, + namespaces *collections.IncludesExcludes, + resources *collections.IncludesExcludes, + labelSelector string, + dynamicFactory client.DynamicFactory, + discoveryHelper discovery.Helper, + backedUpItems map[itemKey]struct{}, + cohabitatingResources map[string]*cohabitatingResource, + actions map[schema.GroupResource]Action, + podCommandExecutor podCommandExecutor, + tarWriter tarWriter, + resourceHooks []resourceHook, +) resourceBackupper { + args := rbf.Called( + log, + backup, + namespaces, + resources, + labelSelector, + dynamicFactory, + discoveryHelper, + backedUpItems, + cohabitatingResources, + actions, + podCommandExecutor, + tarWriter, + resourceHooks, + ) + return args.Get(0).(resourceBackupper) +} + +type mockResourceBackupper struct { + mock.Mock +} + +func (rb *mockResourceBackupper) backupResource(group *metav1.APIResourceList, resource metav1.APIResource) error { + args := rb.Called(group, resource) + return args.Error(0) +} diff --git a/pkg/backup/item_backupper.go b/pkg/backup/item_backupper.go new file mode 100644 index 000000000..ac73c5545 --- /dev/null +++ b/pkg/backup/item_backupper.go @@ -0,0 +1,218 @@ +/* +Copyright 2017 the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backup + +import ( + "archive/tar" + "encoding/json" + "path/filepath" + "time" + + api "github.com/heptio/ark/pkg/apis/ark/v1" + "github.com/heptio/ark/pkg/client" + "github.com/heptio/ark/pkg/discovery" + "github.com/heptio/ark/pkg/util/collections" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type itemBackupperFactory interface { + newItemBackupper( + backup *api.Backup, + namespaces, resources *collections.IncludesExcludes, + backedUpItems map[itemKey]struct{}, + actions map[schema.GroupResource]Action, + podCommandExecutor podCommandExecutor, + tarWriter tarWriter, + resourceHooks []resourceHook, + dynamicFactory client.DynamicFactory, + discoveryHelper discovery.Helper, + ) ItemBackupper +} + +type defaultItemBackupperFactory struct{} + +func (f *defaultItemBackupperFactory) newItemBackupper( + backup *api.Backup, + namespaces, resources *collections.IncludesExcludes, + backedUpItems map[itemKey]struct{}, + actions map[schema.GroupResource]Action, + podCommandExecutor podCommandExecutor, + tarWriter tarWriter, + resourceHooks []resourceHook, + dynamicFactory client.DynamicFactory, + discoveryHelper discovery.Helper, +) ItemBackupper { + ib := &defaultItemBackupper{ + backup: backup, + namespaces: namespaces, + resources: resources, + backedUpItems: backedUpItems, + actions: actions, + tarWriter: tarWriter, + resourceHooks: resourceHooks, + dynamicFactory: dynamicFactory, + discoveryHelper: discoveryHelper, + + itemHookHandler: &defaultItemHookHandler{ + podCommandExecutor: podCommandExecutor, + }, + } + + // this is for testing purposes + ib.additionalItemBackupper = ib + + return ib +} + +type ItemBackupper interface { + backupItem(logger *logrus.Entry, obj runtime.Unstructured, groupResource schema.GroupResource) error +} + +type defaultItemBackupper struct { + backup *api.Backup + namespaces *collections.IncludesExcludes + resources *collections.IncludesExcludes + backedUpItems map[itemKey]struct{} + actions map[schema.GroupResource]Action + tarWriter tarWriter + resourceHooks []resourceHook + dynamicFactory client.DynamicFactory + discoveryHelper discovery.Helper + + itemHookHandler itemHookHandler + additionalItemBackupper ItemBackupper +} + +var podsGroupResource = schema.GroupResource{Group: "", Resource: "pods"} + +// backupItem backs up an individual item to tarWriter. The item may be excluded based on the +// namespaces IncludesExcludes list. +func (ib *defaultItemBackupper) backupItem(logger *logrus.Entry, obj runtime.Unstructured, groupResource schema.GroupResource) error { + metadata, err := meta.Accessor(obj) + if err != nil { + return err + } + + namespace := metadata.GetNamespace() + name := metadata.GetName() + + log := logger.WithField("name", name) + if namespace != "" { + log = log.WithField("namespace", namespace) + } + + // NOTE: we have to re-check namespace & resource includes/excludes because it's possible that + // backupItem can be invoked by a custom action. + if namespace != "" && !ib.namespaces.ShouldInclude(namespace) { + log.Info("Excluding item because namespace is excluded") + return nil + } + + if namespace == "" && ib.backup.Spec.IncludeClusterResources != nil && !*ib.backup.Spec.IncludeClusterResources { + log.Info("Excluding item because resource is cluster-scoped and backup.spec.includeClusterResources is false") + return nil + } + + if !ib.resources.ShouldInclude(groupResource.String()) { + log.Info("Excluding item because resource is excluded") + return nil + } + + key := itemKey{ + resource: groupResource.String(), + namespace: namespace, + name: name, + } + + if _, exists := ib.backedUpItems[key]; exists { + log.Info("Skipping item because it's already been backed up.") + return nil + } + ib.backedUpItems[key] = struct{}{} + + log.Info("Backing up resource") + + item := obj.UnstructuredContent() + // Never save status + delete(item, "status") + + if err := ib.itemHookHandler.handleHooks(log, groupResource, obj, ib.resourceHooks); err != nil { + return err + } + + if action, found := ib.actions[groupResource]; found { + log.Info("Executing custom action") + + if additionalItemIdentifiers, err := action.Execute(log, obj, ib.backup); err == nil { + for _, additionalItem := range additionalItemIdentifiers { + gvr, resource, err := ib.discoveryHelper.ResourceFor(additionalItem.GroupResource.WithVersion("")) + if err != nil { + return err + } + + client, err := ib.dynamicFactory.ClientForGroupVersionResource(gvr.GroupVersion(), resource, additionalItem.Namespace) + if err != nil { + return err + } + + additionalItem, err := client.Get(additionalItem.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + ib.additionalItemBackupper.backupItem(log, additionalItem, gvr.GroupResource()) + } + } else { + return errors.Wrap(err, "error executing custom action") + } + } + + var filePath string + if namespace != "" { + filePath = filepath.Join(api.ResourcesDir, groupResource.String(), api.NamespaceScopedDir, namespace, name+".json") + } else { + filePath = filepath.Join(api.ResourcesDir, groupResource.String(), api.ClusterScopedDir, name+".json") + } + + itemBytes, err := json.Marshal(item) + if err != nil { + return errors.WithStack(err) + } + + hdr := &tar.Header{ + Name: filePath, + Size: int64(len(itemBytes)), + Typeflag: tar.TypeReg, + Mode: 0755, + ModTime: time.Now(), + } + + if err := ib.tarWriter.WriteHeader(hdr); err != nil { + return errors.WithStack(err) + } + + if _, err := ib.tarWriter.Write(itemBytes); err != nil { + return errors.WithStack(err) + } + + return nil +} diff --git a/pkg/backup/item_backupper_test.go b/pkg/backup/item_backupper_test.go new file mode 100644 index 000000000..240658467 --- /dev/null +++ b/pkg/backup/item_backupper_test.go @@ -0,0 +1,384 @@ +/* +Copyright 2017 the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backup + +import ( + "archive/tar" + "encoding/json" + "fmt" + "reflect" + "testing" + + "github.com/heptio/ark/pkg/apis/ark/v1" + "github.com/heptio/ark/pkg/util/collections" + arktest "github.com/heptio/ark/pkg/util/test" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func TestBackupItemSkips(t *testing.T) { + tests := []struct { + testName string + namespace string + name string + namespaces *collections.IncludesExcludes + groupResource schema.GroupResource + resources *collections.IncludesExcludes + backedUpItems map[itemKey]struct{} + }{ + { + testName: "namespace not in includes list", + namespace: "ns", + name: "foo", + namespaces: collections.NewIncludesExcludes().Includes("a"), + }, + { + testName: "namespace in excludes list", + namespace: "ns", + name: "foo", + namespaces: collections.NewIncludesExcludes().Excludes("ns"), + }, + { + testName: "resource not in includes list", + namespace: "ns", + name: "foo", + groupResource: schema.GroupResource{Group: "foo", Resource: "bar"}, + namespaces: collections.NewIncludesExcludes(), + resources: collections.NewIncludesExcludes().Includes("a.b"), + }, + { + testName: "resource in excludes list", + namespace: "ns", + name: "foo", + groupResource: schema.GroupResource{Group: "foo", Resource: "bar"}, + namespaces: collections.NewIncludesExcludes(), + resources: collections.NewIncludesExcludes().Excludes("bar.foo"), + }, + { + testName: "resource already backed up", + namespace: "ns", + name: "foo", + groupResource: schema.GroupResource{Group: "foo", Resource: "bar"}, + namespaces: collections.NewIncludesExcludes(), + resources: collections.NewIncludesExcludes(), + backedUpItems: map[itemKey]struct{}{ + {resource: "bar.foo", namespace: "ns", name: "foo"}: struct{}{}, + }, + }, + } + + for _, test := range tests { + t.Run(test.testName, func(t *testing.T) { + ib := &defaultItemBackupper{ + namespaces: test.namespaces, + resources: test.resources, + backedUpItems: test.backedUpItems, + } + + u := unstructuredOrDie(fmt.Sprintf(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"%s","name":"%s"}}`, test.namespace, test.name)) + err := ib.backupItem(arktest.NewLogger(), u, test.groupResource) + assert.NoError(t, err) + }) + } +} + +func TestBackupItemNoSkips(t *testing.T) { + tests := []struct { + name string + item string + namespaceIncludesExcludes *collections.IncludesExcludes + expectError bool + expectExcluded bool + expectedTarHeaderName string + tarWriteError bool + tarHeaderWriteError bool + customAction bool + expectedActionID string + customActionAdditionalItemIdentifiers []ResourceIdentifier + customActionAdditionalItems []runtime.Unstructured + }{ + { + name: "explicit namespace include", + item: `{"metadata":{"namespace":"foo","name":"bar"}}`, + namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("foo"), + expectError: false, + expectExcluded: false, + expectedTarHeaderName: "resources/resource.group/namespaces/foo/bar.json", + }, + { + name: "* namespace include", + item: `{"metadata":{"namespace":"foo","name":"bar"}}`, + namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), + expectError: false, + expectExcluded: false, + expectedTarHeaderName: "resources/resource.group/namespaces/foo/bar.json", + }, + { + name: "cluster-scoped", + item: `{"metadata":{"name":"bar"}}`, + expectError: false, + expectExcluded: false, + expectedTarHeaderName: "resources/resource.group/cluster/bar.json", + }, + { + name: "make sure status is deleted", + item: `{"metadata":{"name":"bar"},"spec":{"color":"green"},"status":{"foo":"bar"}}`, + expectError: false, + expectExcluded: false, + expectedTarHeaderName: "resources/resource.group/cluster/bar.json", + }, + { + name: "tar header write error", + item: `{"metadata":{"name":"bar"},"spec":{"color":"green"},"status":{"foo":"bar"}}`, + expectError: true, + tarHeaderWriteError: true, + }, + { + name: "tar write error", + item: `{"metadata":{"name":"bar"},"spec":{"color":"green"},"status":{"foo":"bar"}}`, + expectError: true, + tarWriteError: true, + }, + { + name: "action invoked - cluster-scoped", + namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), + item: `{"metadata":{"name":"bar"}}`, + expectError: false, + expectExcluded: false, + expectedTarHeaderName: "resources/resource.group/cluster/bar.json", + customAction: true, + expectedActionID: "bar", + }, + { + name: "action invoked - namespaced", + namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), + item: `{"metadata":{"namespace": "myns", "name":"bar"}}`, + expectError: false, + expectExcluded: false, + expectedTarHeaderName: "resources/resource.group/namespaces/myns/bar.json", + customAction: true, + expectedActionID: "myns/bar", + }, + { + name: "action invoked - additional items", + namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), + item: `{"metadata":{"namespace": "myns", "name":"bar"}}`, + expectError: false, + expectExcluded: false, + expectedTarHeaderName: "resources/resource.group/namespaces/myns/bar.json", + customAction: true, + expectedActionID: "myns/bar", + customActionAdditionalItemIdentifiers: []ResourceIdentifier{ + { + GroupResource: schema.GroupResource{Group: "g1", Resource: "r1"}, + Namespace: "ns1", + Name: "n1", + }, + { + GroupResource: schema.GroupResource{Group: "g2", Resource: "r2"}, + Namespace: "ns2", + Name: "n2", + }, + }, + customActionAdditionalItems: []runtime.Unstructured{ + unstructuredOrDie(`{"apiVersion":"g1/v1","kind":"r1","metadata":{"namespace":"ns1","name":"n1"}}`), + unstructuredOrDie(`{"apiVersion":"g2/v1","kind":"r1","metadata":{"namespace":"ns2","name":"n2"}}`), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var ( + actions map[schema.GroupResource]Action + action *fakeAction + backup = &v1.Backup{} + groupResource = schema.ParseGroupResource("resource.group") + backedUpItems = make(map[itemKey]struct{}) + resources = collections.NewIncludesExcludes() + w = &fakeTarWriter{} + ) + + item, err := getAsMap(test.item) + if err != nil { + t.Fatal(err) + } + + namespaces := test.namespaceIncludesExcludes + if namespaces == nil { + namespaces = collections.NewIncludesExcludes() + } + + if test.tarHeaderWriteError { + w.writeHeaderError = errors.New("error") + } + if test.tarWriteError { + w.writeError = errors.New("error") + } + + if test.customAction { + action = &fakeAction{ + additionalItems: test.customActionAdditionalItemIdentifiers, + } + actions = map[schema.GroupResource]Action{ + groupResource: action, + } + } + + resourceHooks := []resourceHook{} + + podCommandExecutor := &mockPodCommandExecutor{} + defer podCommandExecutor.AssertExpectations(t) + + dynamicFactory := &arktest.FakeDynamicFactory{} + defer dynamicFactory.AssertExpectations(t) + + discoveryHelper := arktest.NewFakeDiscoveryHelper(true, nil) + + b := (&defaultItemBackupperFactory{}).newItemBackupper( + backup, + namespaces, + resources, + backedUpItems, + actions, + podCommandExecutor, + w, + resourceHooks, + dynamicFactory, + discoveryHelper, + ).(*defaultItemBackupper) + + // make sure the podCommandExecutor was set correctly in the real hook handler + assert.Equal(t, podCommandExecutor, b.itemHookHandler.(*defaultItemHookHandler).podCommandExecutor) + + itemHookHandler := &mockItemHookHandler{} + defer itemHookHandler.AssertExpectations(t) + b.itemHookHandler = itemHookHandler + + additionalItemBackupper := &mockItemBackupper{} + defer additionalItemBackupper.AssertExpectations(t) + b.additionalItemBackupper = additionalItemBackupper + + obj := &unstructured.Unstructured{Object: item} + itemHookHandler.On("handleHooks", mock.Anything, groupResource, obj, resourceHooks).Return(nil) + + for i, item := range test.customActionAdditionalItemIdentifiers { + itemClient := &arktest.FakeDynamicClient{} + defer itemClient.AssertExpectations(t) + + dynamicFactory.On("ClientForGroupVersionResource", item.GroupResource.WithVersion("").GroupVersion(), metav1.APIResource{Name: item.Resource}, item.Namespace).Return(itemClient, nil) + + itemClient.On("Get", item.Name, metav1.GetOptions{}).Return(test.customActionAdditionalItems[i], nil) + + additionalItemBackupper.On("backupItem", mock.AnythingOfType("*logrus.Entry"), test.customActionAdditionalItems[i], item.GroupResource).Return(nil) + } + + err = b.backupItem(arktest.NewLogger(), obj, groupResource) + gotError := err != nil + if e, a := test.expectError, gotError; e != a { + t.Fatalf("error: expected %t, got %t", e, a) + } + if test.expectError { + return + } + + if test.expectExcluded { + if len(w.headers) > 0 { + t.Errorf("unexpected header write") + } + if len(w.data) > 0 { + t.Errorf("unexpected data write") + } + return + } + + // we have to delete status as that's what backupItem does, + // and this ensures that we're verifying the right data + delete(item, "status") + itemWithoutStatus, err := json.Marshal(&item) + if err != nil { + t.Fatal(err) + } + + require.Equal(t, 1, len(w.headers), "headers") + assert.Equal(t, test.expectedTarHeaderName, w.headers[0].Name, "header.name") + assert.Equal(t, int64(len(itemWithoutStatus)), w.headers[0].Size, "header.size") + assert.Equal(t, byte(tar.TypeReg), w.headers[0].Typeflag, "header.typeflag") + assert.Equal(t, int64(0755), w.headers[0].Mode, "header.mode") + assert.False(t, w.headers[0].ModTime.IsZero(), "header.modTime set") + assert.Equal(t, 1, len(w.data), "# of data") + + actual, err := getAsMap(string(w.data[0])) + if err != nil { + t.Fatal(err) + } + if e, a := item, actual; !reflect.DeepEqual(e, a) { + t.Errorf("data: expected %s, got %s", e, a) + } + + if test.customAction { + if len(action.ids) != 1 { + t.Errorf("unexpected custom action ids: %v", action.ids) + } else if e, a := test.expectedActionID, action.ids[0]; e != a { + t.Errorf("action.ids[0]: expected %s, got %s", e, a) + } + + if len(action.backups) != 1 { + t.Errorf("unexpected custom action backups: %#v", action.backups) + } else if e, a := backup, action.backups[0]; e != a { + t.Errorf("action.backups[0]: expected %#v, got %#v", e, a) + } + } + }) + } +} + +type fakeTarWriter struct { + closeCalled bool + headers []*tar.Header + data [][]byte + writeHeaderError error + writeError error +} + +func (w *fakeTarWriter) Close() error { return nil } + +func (w *fakeTarWriter) Write(data []byte) (int, error) { + w.data = append(w.data, data) + return 0, w.writeError +} + +func (w *fakeTarWriter) WriteHeader(header *tar.Header) error { + w.headers = append(w.headers, header) + return w.writeHeaderError +} + +type mockItemBackupper struct { + mock.Mock +} + +func (ib *mockItemBackupper) backupItem(logger *logrus.Entry, obj runtime.Unstructured, groupResource schema.GroupResource) error { + args := ib.Called(logger, obj, groupResource) + return args.Error(0) +} diff --git a/pkg/backup/item_hook_handler.go b/pkg/backup/item_hook_handler.go new file mode 100644 index 000000000..b9a047681 --- /dev/null +++ b/pkg/backup/item_hook_handler.go @@ -0,0 +1,186 @@ +/* +Copyright 2017 the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backup + +import ( + "encoding/json" + "time" + + api "github.com/heptio/ark/pkg/apis/ark/v1" + "github.com/heptio/ark/pkg/util/collections" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// itemHookHandler invokes hooks for an item. +type itemHookHandler interface { + // handleHooks invokes hooks for an item. If the item is a pod and the appropriate annotations exist + // to specify a hook, that is executed. Otherwise, this looks at the backup context's Backup to + // determine if there are any hooks relevant to the item, taking into account the hook spec's + // namespaces, resources, and label selector. + handleHooks(log *logrus.Entry, groupResource schema.GroupResource, obj runtime.Unstructured, resourceHooks []resourceHook) error +} + +// defaultItemHookHandler is the default itemHookHandler. +type defaultItemHookHandler struct { + podCommandExecutor podCommandExecutor +} + +func (h *defaultItemHookHandler) handleHooks( + log *logrus.Entry, + groupResource schema.GroupResource, + obj runtime.Unstructured, + resourceHooks []resourceHook, +) error { + // We only support hooks on pods right now + if groupResource != podsGroupResource { + return nil + } + + metadata, err := meta.Accessor(obj) + if err != nil { + return errors.Wrap(err, "unable to get a metadata accessor") + } + + namespace := metadata.GetNamespace() + name := metadata.GetName() + + // If the pod has the hook specified via annotations, that takes priority. + if hookFromAnnotations := getPodExecHookFromAnnotations(metadata.GetAnnotations()); hookFromAnnotations != nil { + hookLog := log.WithFields( + logrus.Fields{ + "hookSource": "annotation", + "hookType": "exec", + }, + ) + if err := h.podCommandExecutor.executePodCommand(hookLog, obj.UnstructuredContent(), namespace, name, "", hookFromAnnotations); err != nil { + hookLog.WithError(err).Error("Error executing hook") + if hookFromAnnotations.OnError == api.HookErrorModeFail { + return err + } + } + + return nil + } + + labels := labels.Set(metadata.GetLabels()) + // Otherwise, check for hooks defined in the backup spec. + for _, resourceHook := range resourceHooks { + if !resourceHook.applicableTo(groupResource, namespace, labels) { + continue + } + + for _, hook := range resourceHook.hooks { + if groupResource == podsGroupResource { + if hook.Exec != nil { + hookLog := log.WithFields( + logrus.Fields{ + "hookSource": "backupSpec", + "hookType": "exec", + }, + ) + err := h.podCommandExecutor.executePodCommand(hookLog, obj.UnstructuredContent(), namespace, name, resourceHook.name, hook.Exec) + if err != nil { + hookLog.WithError(err).Error("Error executing hook") + if hook.Exec.OnError == api.HookErrorModeFail { + return err + } + } + } + } + } + } + + return nil +} + +const ( + podBackupHookContainerAnnotationKey = "hook.backup.ark.heptio.com/container" + podBackupHookCommandAnnotationKey = "hook.backup.ark.heptio.com/command" + podBackupHookOnErrorAnnotationKey = "hook.backup.ark.heptio.com/on-error" + podBackupHookTimeoutAnnotationKey = "hook.backup.ark.heptio.com/timeout" + defaultHookOnError = api.HookErrorModeFail + defaultHookTimeout = 30 * time.Second +) + +// getPodExecHookFromAnnotations returns an ExecHook based on the annotations, as long as the +// 'command' annotation is present. If it is absent, this returns nil. +func getPodExecHookFromAnnotations(annotations map[string]string) *api.ExecHook { + container := annotations[podBackupHookContainerAnnotationKey] + + commandValue, ok := annotations[podBackupHookCommandAnnotationKey] + if !ok { + return nil + } + var command []string + // check for json array + if commandValue[0] == '[' { + if err := json.Unmarshal([]byte(commandValue), &command); err != nil { + command = []string{commandValue} + } + } else { + command = append(command, commandValue) + } + + onError := api.HookErrorMode(annotations[podBackupHookOnErrorAnnotationKey]) + if onError != api.HookErrorModeContinue && onError != api.HookErrorModeFail { + onError = "" + } + + var timeout time.Duration + timeoutString := annotations[podBackupHookTimeoutAnnotationKey] + if timeoutString != "" { + if temp, err := time.ParseDuration(timeoutString); err == nil { + timeout = temp + } else { + // TODO: log error that we couldn't parse duration + } + } + + return &api.ExecHook{ + Container: container, + Command: command, + OnError: onError, + Timeout: metav1.Duration{Duration: timeout}, + } +} + +type resourceHook struct { + name string + namespaces *collections.IncludesExcludes + resources *collections.IncludesExcludes + labelSelector labels.Selector + hooks []api.BackupResourceHook +} + +func (r resourceHook) applicableTo(groupResource schema.GroupResource, namespace string, labels labels.Set) bool { + if r.namespaces != nil && !r.namespaces.ShouldInclude(namespace) { + return false + } + if r.resources != nil && !r.resources.ShouldInclude(groupResource.String()) { + return false + } + if r.labelSelector != nil && !r.labelSelector.Matches(labels) { + return false + } + return true +} diff --git a/pkg/backup/item_hook_handler_test.go b/pkg/backup/item_hook_handler_test.go new file mode 100644 index 000000000..98f1d70c3 --- /dev/null +++ b/pkg/backup/item_hook_handler_test.go @@ -0,0 +1,583 @@ +/* +Copyright 2017 the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backup + +import ( + "testing" + "time" + + "github.com/heptio/ark/pkg/apis/ark/v1" + "github.com/heptio/ark/pkg/util/collections" + arktest "github.com/heptio/ark/pkg/util/test" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type mockItemHookHandler struct { + mock.Mock +} + +func (h *mockItemHookHandler) handleHooks(log *logrus.Entry, groupResource schema.GroupResource, obj runtime.Unstructured, resourceHooks []resourceHook) error { + args := h.Called(log, groupResource, obj, resourceHooks) + return args.Error(0) +} + +func TestHandleHooksSkips(t *testing.T) { + tests := []struct { + name string + groupResource string + item runtime.Unstructured + hooks []resourceHook + }{ + { + name: "not a pod", + groupResource: "widget.group", + }, + { + name: "pod without annotation / no spec hooks", + item: unstructuredOrDie( + ` + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "namespace": "ns", + "name": "foo" + } + } + `, + ), + }, + { + name: "spec hooks not applicable", + groupResource: "pods", + item: unstructuredOrDie( + ` + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "namespace": "ns", + "name": "foo", + "labels": { + "color": "blue" + } + } + } + `, + ), + hooks: []resourceHook{ + { + name: "ns exclude", + namespaces: collections.NewIncludesExcludes().Excludes("ns"), + }, + { + name: "resource exclude", + resources: collections.NewIncludesExcludes().Includes("widgets.group"), + }, + { + name: "label selector mismatch", + labelSelector: parseLabelSelectorOrDie("color=green"), + }, + { + name: "missing exec hook", + hooks: []v1.BackupResourceHook{ + {}, + {}, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + podCommandExecutor := &mockPodCommandExecutor{} + defer podCommandExecutor.AssertExpectations(t) + + h := &defaultItemHookHandler{ + podCommandExecutor: podCommandExecutor, + } + + groupResource := schema.ParseGroupResource(test.groupResource) + err := h.handleHooks(arktest.NewLogger(), groupResource, test.item, test.hooks) + assert.NoError(t, err) + }) + } +} + +func TestHandleHooksPodFromPodAnnotation(t *testing.T) { + tests := []struct { + name string + groupResource string + item runtime.Unstructured + hooks []resourceHook + hookErrorsByContainer map[string]error + expectedError error + expectedPodHook *v1.ExecHook + expectedPodHookError error + }{ + { + name: "pod, no annotation, spec (multiple hooks) = run spec", + groupResource: "pods", + item: unstructuredOrDie(` + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "namespace": "ns", + "name": "name" + } + }`), + hooks: []resourceHook{ + { + name: "hook1", + hooks: []v1.BackupResourceHook{ + { + Exec: &v1.ExecHook{ + Container: "1a", + Command: []string{"1a"}, + }, + }, + { + Exec: &v1.ExecHook{ + Container: "1b", + Command: []string{"1b"}, + }, + }, + }, + }, + { + name: "hook2", + hooks: []v1.BackupResourceHook{ + { + Exec: &v1.ExecHook{ + Container: "2a", + Command: []string{"2a"}, + }, + }, + { + Exec: &v1.ExecHook{ + Container: "2b", + Command: []string{"2b"}, + }, + }, + }, + }, + }, + }, + { + name: "pod, annotation, no spec = run annotation", + groupResource: "pods", + item: unstructuredOrDie(` + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "namespace": "ns", + "name": "name", + "annotations": { + "hook.backup.ark.heptio.com/container": "c", + "hook.backup.ark.heptio.com/command": "/bin/ls" + } + } + }`), + expectedPodHook: &v1.ExecHook{ + Container: "c", + Command: []string{"/bin/ls"}, + }, + }, + { + name: "pod, annotation & spec = run annotation", + groupResource: "pods", + item: unstructuredOrDie(` + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "namespace": "ns", + "name": "name", + "annotations": { + "hook.backup.ark.heptio.com/container": "c", + "hook.backup.ark.heptio.com/command": "/bin/ls" + } + } + }`), + expectedPodHook: &v1.ExecHook{ + Container: "c", + Command: []string{"/bin/ls"}, + }, + hooks: []resourceHook{ + { + name: "hook1", + hooks: []v1.BackupResourceHook{ + { + Exec: &v1.ExecHook{ + Container: "1a", + Command: []string{"1a"}, + }, + }, + }, + }, + }, + }, + { + name: "pod, annotation, onError=fail = return error", + groupResource: "pods", + item: unstructuredOrDie(` + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "namespace": "ns", + "name": "name", + "annotations": { + "hook.backup.ark.heptio.com/container": "c", + "hook.backup.ark.heptio.com/command": "/bin/ls", + "hook.backup.ark.heptio.com/on-error": "Fail" + } + } + }`), + expectedPodHook: &v1.ExecHook{ + Container: "c", + Command: []string{"/bin/ls"}, + OnError: v1.HookErrorModeFail, + }, + expectedPodHookError: errors.New("pod hook error"), + expectedError: errors.New("pod hook error"), + }, + { + name: "pod, annotation, onError=continue = return nil", + groupResource: "pods", + item: unstructuredOrDie(` + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "namespace": "ns", + "name": "name", + "annotations": { + "hook.backup.ark.heptio.com/container": "c", + "hook.backup.ark.heptio.com/command": "/bin/ls", + "hook.backup.ark.heptio.com/on-error": "Continue" + } + } + }`), + expectedPodHook: &v1.ExecHook{ + Container: "c", + Command: []string{"/bin/ls"}, + OnError: v1.HookErrorModeContinue, + }, + expectedPodHookError: errors.New("pod hook error"), + expectedError: nil, + }, + { + name: "pod, spec, onError=fail = don't run other hooks", + groupResource: "pods", + item: unstructuredOrDie(` + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "namespace": "ns", + "name": "name" + } + }`), + hooks: []resourceHook{ + { + name: "hook1", + hooks: []v1.BackupResourceHook{ + { + Exec: &v1.ExecHook{ + Container: "1a", + Command: []string{"1a"}, + OnError: v1.HookErrorModeContinue, + }, + }, + { + Exec: &v1.ExecHook{ + Container: "1b", + Command: []string{"1b"}, + }, + }, + }, + }, + { + name: "hook2", + hooks: []v1.BackupResourceHook{ + { + Exec: &v1.ExecHook{ + Container: "2", + Command: []string{"2"}, + OnError: v1.HookErrorModeFail, + }, + }, + }, + }, + { + name: "hook3", + hooks: []v1.BackupResourceHook{ + { + Exec: &v1.ExecHook{ + Container: "3", + Command: []string{"3"}, + }, + }, + }, + }, + }, + hookErrorsByContainer: map[string]error{ + "1a": errors.New("1a error, but continue"), + "2": errors.New("2 error, fail"), + }, + expectedError: errors.New("2 error, fail"), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + podCommandExecutor := &mockPodCommandExecutor{} + defer podCommandExecutor.AssertExpectations(t) + + h := &defaultItemHookHandler{ + podCommandExecutor: podCommandExecutor, + } + + if test.expectedPodHook != nil { + podCommandExecutor.On("executePodCommand", mock.Anything, test.item.UnstructuredContent(), "ns", "name", "", test.expectedPodHook).Return(test.expectedPodHookError) + } else { + hookLoop: + for _, resourceHook := range test.hooks { + for _, hook := range resourceHook.hooks { + hookError := test.hookErrorsByContainer[hook.Exec.Container] + podCommandExecutor.On("executePodCommand", mock.Anything, test.item.UnstructuredContent(), "ns", "name", resourceHook.name, hook.Exec).Return(hookError) + if hookError != nil && hook.Exec.OnError == v1.HookErrorModeFail { + break hookLoop + } + } + } + } + + groupResource := schema.ParseGroupResource(test.groupResource) + err := h.handleHooks(arktest.NewLogger(), groupResource, test.item, test.hooks) + + if test.expectedError != nil { + assert.EqualError(t, err, test.expectedError.Error()) + return + } + + require.NoError(t, err) + }) + } +} + +func TestGetPodExecHookFromAnnotations(t *testing.T) { + tests := []struct { + name string + annotations map[string]string + expectedHook *v1.ExecHook + }{ + { + name: "missing command annotation", + expectedHook: nil, + }, + { + name: "malformed command json array", + annotations: map[string]string{ + podBackupHookCommandAnnotationKey: "[blarg", + }, + expectedHook: &v1.ExecHook{ + Command: []string{"[blarg"}, + }, + }, + { + name: "valid command json array", + annotations: map[string]string{ + podBackupHookCommandAnnotationKey: `["a","b","c"]`, + }, + expectedHook: &v1.ExecHook{ + Command: []string{"a", "b", "c"}, + }, + }, + { + name: "command as a string", + annotations: map[string]string{ + podBackupHookCommandAnnotationKey: "/usr/bin/foo", + }, + expectedHook: &v1.ExecHook{ + Command: []string{"/usr/bin/foo"}, + }, + }, + { + name: "hook mode set to continue", + annotations: map[string]string{ + podBackupHookCommandAnnotationKey: "/usr/bin/foo", + podBackupHookOnErrorAnnotationKey: string(v1.HookErrorModeContinue), + }, + expectedHook: &v1.ExecHook{ + Command: []string{"/usr/bin/foo"}, + OnError: v1.HookErrorModeContinue, + }, + }, + { + name: "hook mode set to fail", + annotations: map[string]string{ + podBackupHookCommandAnnotationKey: "/usr/bin/foo", + podBackupHookOnErrorAnnotationKey: string(v1.HookErrorModeFail), + }, + expectedHook: &v1.ExecHook{ + Command: []string{"/usr/bin/foo"}, + OnError: v1.HookErrorModeFail, + }, + }, + { + name: "use the specified timeout", + annotations: map[string]string{ + podBackupHookCommandAnnotationKey: "/usr/bin/foo", + podBackupHookTimeoutAnnotationKey: "5m3s", + }, + expectedHook: &v1.ExecHook{ + Command: []string{"/usr/bin/foo"}, + Timeout: metav1.Duration{Duration: 5*time.Minute + 3*time.Second}, + }, + }, + { + name: "invalid timeout is ignored", + annotations: map[string]string{ + podBackupHookCommandAnnotationKey: "/usr/bin/foo", + podBackupHookTimeoutAnnotationKey: "invalid", + }, + expectedHook: &v1.ExecHook{ + Command: []string{"/usr/bin/foo"}, + }, + }, + { + name: "use the specified container", + annotations: map[string]string{ + podBackupHookContainerAnnotationKey: "some-container", + podBackupHookCommandAnnotationKey: "/usr/bin/foo", + }, + expectedHook: &v1.ExecHook{ + Container: "some-container", + Command: []string{"/usr/bin/foo"}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + hook := getPodExecHookFromAnnotations(test.annotations) + assert.Equal(t, test.expectedHook, hook) + }) + } +} + +func TestResourceHookApplicableTo(t *testing.T) { + tests := []struct { + name string + includedNamespaces []string + excludedNamespaces []string + includedResources []string + excludedResources []string + labelSelector string + namespace string + resource schema.GroupResource + labels labels.Set + expected bool + }{ + { + name: "allow anything", + namespace: "foo", + resource: schema.GroupResource{Group: "foo", Resource: "bar"}, + expected: true, + }, + { + name: "namespace in included list", + includedNamespaces: []string{"a", "b"}, + excludedNamespaces: []string{"c", "d"}, + namespace: "b", + expected: true, + }, + { + name: "namespace not in included list", + includedNamespaces: []string{"a", "b"}, + namespace: "c", + expected: false, + }, + { + name: "namespace excluded", + excludedNamespaces: []string{"a", "b"}, + namespace: "a", + expected: false, + }, + { + name: "resource in included list", + includedResources: []string{"foo.a", "bar.b"}, + excludedResources: []string{"baz.c"}, + resource: schema.GroupResource{Group: "a", Resource: "foo"}, + expected: true, + }, + { + name: "resource not in included list", + includedResources: []string{"foo.a", "bar.b"}, + resource: schema.GroupResource{Group: "c", Resource: "baz"}, + expected: false, + }, + { + name: "resource excluded", + excludedResources: []string{"foo.a", "bar.b"}, + resource: schema.GroupResource{Group: "b", Resource: "bar"}, + expected: false, + }, + { + name: "label selector matches", + labelSelector: "a=b", + labels: labels.Set{"a": "b"}, + expected: true, + }, + { + name: "label selector doesn't match", + labelSelector: "a=b", + labels: labels.Set{"a": "c"}, + expected: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + h := resourceHook{ + namespaces: collections.NewIncludesExcludes().Includes(test.includedNamespaces...).Excludes(test.excludedNamespaces...), + resources: collections.NewIncludesExcludes().Includes(test.includedResources...).Excludes(test.excludedResources...), + } + if test.labelSelector != "" { + selector, err := labels.Parse(test.labelSelector) + require.NoError(t, err) + h.labelSelector = selector + } + + result := h.applicableTo(test.resource, test.namespace, test.labels) + assert.Equal(t, test.expected, result) + }) + } +} diff --git a/pkg/backup/mocks_test.go b/pkg/backup/mocks_test.go new file mode 100644 index 000000000..42b33e95a --- /dev/null +++ b/pkg/backup/mocks_test.go @@ -0,0 +1,17 @@ +/* +Copyright 2017 the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backup diff --git a/pkg/backup/pod_command_executor.go b/pkg/backup/pod_command_executor.go new file mode 100644 index 000000000..5cccdbced --- /dev/null +++ b/pkg/backup/pod_command_executor.go @@ -0,0 +1,225 @@ +/* +Copyright 2017 the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backup + +import ( + "bytes" + "net/url" + "time" + + api "github.com/heptio/ark/pkg/apis/ark/v1" + "github.com/heptio/ark/pkg/util/collections" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + remotecommandconsts "k8s.io/apimachinery/pkg/util/remotecommand" + kscheme "k8s.io/client-go/kubernetes/scheme" + kapiv1 "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/remotecommand" +) + +// podCommandExecutor is capable of executing a command in a container in a pod. +type podCommandExecutor interface { + // executePodCommand executes a command in a container in a pod. If the command takes longer than + // the specified timeout, an error is returned. + executePodCommand(log *logrus.Entry, item map[string]interface{}, namespace, name, hookName string, hook *api.ExecHook) error +} + +type poster interface { + Post() *rest.Request +} + +type defaultPodCommandExecutor struct { + restClientConfig *rest.Config + restClient poster + + streamExecutorFactory streamExecutorFactory +} + +// NewPodCommandExecutor creates a new podCommandExecutor. +func NewPodCommandExecutor(restClientConfig *rest.Config, restClient poster) podCommandExecutor { + return &defaultPodCommandExecutor{ + restClientConfig: restClientConfig, + restClient: restClient, + + streamExecutorFactory: &defaultStreamExecutorFactory{}, + } +} + +// executePodCommand uses the pod exec API to execute a command in a container in a pod. If the +// command takes longer than the specified timeout, an error is returned (NOTE: it is not currently +// possible to ensure the command is terminated when the timeout occurs, so it may continue to run +// in the background). +func (e *defaultPodCommandExecutor) executePodCommand(log *logrus.Entry, item map[string]interface{}, namespace, name, hookName string, hook *api.ExecHook) error { + if item == nil { + return errors.New("item is required") + } + if namespace == "" { + return errors.New("namespace is required") + } + if name == "" { + return errors.New("name is required") + } + if hookName == "" { + return errors.New("hookName is required") + } + if hook == nil { + return errors.New("hook is required") + } + + if hook.Container == "" { + if err := setDefaultHookContainer(item, hook); err != nil { + return err + } + } else if err := ensureContainerExists(item, hook.Container); err != nil { + return err + } + + if len(hook.Command) == 0 { + return errors.New("command is required") + } + + switch hook.OnError { + case api.HookErrorModeFail, api.HookErrorModeContinue: + // use the specified value + default: + // default to fail + hook.OnError = api.HookErrorModeFail + } + + if hook.Timeout.Duration == 0 { + hook.Timeout.Duration = defaultHookTimeout + } + + hookLog := log.WithFields( + logrus.Fields{ + "hookName": hookName, + "hookContainer": hook.Container, + "hookCommand": hook.Command, + "hookOnError": hook.OnError, + "hookTimeout": hook.Timeout, + }, + ) + hookLog.Info("running exec hook") + + req := e.restClient.Post(). + Resource("pods"). + Namespace(namespace). + Name(name). + SubResource("exec") + + req.VersionedParams(&kapiv1.PodExecOptions{ + Container: hook.Container, + Command: hook.Command, + Stdout: true, + Stderr: true, + }, kscheme.ParameterCodec) + + executor, err := e.streamExecutorFactory.NewExecutor(e.restClientConfig, "POST", req.URL()) + if err != nil { + return err + } + + var stdout, stderr bytes.Buffer + + streamOptions := remotecommand.StreamOptions{ + SupportedProtocols: remotecommandconsts.SupportedStreamingProtocols, + Stdout: &stdout, + Stderr: &stderr, + } + + errCh := make(chan error) + + go func() { + err = executor.Stream(streamOptions) + errCh <- err + }() + + var timeoutCh <-chan time.Time + if hook.Timeout.Duration > 0 { + timer := time.NewTimer(hook.Timeout.Duration) + defer timer.Stop() + timeoutCh = timer.C + } + + select { + case err = <-errCh: + case <-timeoutCh: + return errors.Errorf("timed out after %v", hook.Timeout.Duration) + } + + hookLog.Infof("stdout: %s", stdout.String()) + hookLog.Infof("stderr: %s", stderr.String()) + + return err +} + +func ensureContainerExists(pod map[string]interface{}, container string) error { + containers, err := collections.GetSlice(pod, "spec.containers") + if err != nil { + return err + } + for _, obj := range containers { + c, ok := obj.(map[string]interface{}) + if !ok { + return errors.Errorf("unexpected type for container %T", obj) + } + name, ok := c["name"].(string) + if !ok { + return errors.Errorf("unexpected type for container name %T", c["name"]) + } + if name == container { + return nil + } + } + + return errors.Errorf("no such container: %q", container) +} + +func setDefaultHookContainer(pod map[string]interface{}, hook *api.ExecHook) error { + containers, err := collections.GetSlice(pod, "spec.containers") + if err != nil { + return err + } + + if len(containers) < 1 { + return errors.New("need at least 1 container") + } + + container, ok := containers[0].(map[string]interface{}) + if !ok { + return errors.Errorf("unexpected type for container %T", pod) + } + + name, ok := container["name"].(string) + if !ok { + return errors.Errorf("unexpected type for container name %T", container["name"]) + } + hook.Container = name + + return nil +} + +type streamExecutorFactory interface { + NewExecutor(config *rest.Config, method string, url *url.URL) (remotecommand.StreamExecutor, error) +} + +type defaultStreamExecutorFactory struct{} + +func (f *defaultStreamExecutorFactory) NewExecutor(config *rest.Config, method string, url *url.URL) (remotecommand.StreamExecutor, error) { + return remotecommand.NewExecutor(config, method, url) +} diff --git a/pkg/backup/pod_command_executor_test.go b/pkg/backup/pod_command_executor_test.go new file mode 100644 index 000000000..f5ee7fcda --- /dev/null +++ b/pkg/backup/pod_command_executor_test.go @@ -0,0 +1,278 @@ +/* +Copyright 2017 the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backup + +import ( + "bytes" + "fmt" + "net/url" + "strings" + "testing" + "time" + + "github.com/heptio/ark/pkg/apis/ark/v1" + arktest "github.com/heptio/ark/pkg/util/test" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + remotecommandconsts "k8s.io/apimachinery/pkg/util/remotecommand" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/remotecommand" +) + +func TestNewPodCommandExecutor(t *testing.T) { + restClientConfig := &rest.Config{Host: "foo"} + poster := &mockPoster{} + pce := NewPodCommandExecutor(restClientConfig, poster).(*defaultPodCommandExecutor) + assert.Equal(t, restClientConfig, pce.restClientConfig) + assert.Equal(t, poster, pce.restClient) + assert.Equal(t, &defaultStreamExecutorFactory{}, pce.streamExecutorFactory) +} + +func TestExecutePodCommandMissingInputs(t *testing.T) { + tests := []struct { + name string + item map[string]interface{} + podNamespace string + podName string + hookName string + hook *v1.ExecHook + }{ + { + name: "missing item", + }, + { + name: "missing pod namespace", + item: map[string]interface{}{}, + }, + { + name: "missing pod name", + item: map[string]interface{}{}, + podNamespace: "ns", + }, + { + name: "missing hookName", + item: map[string]interface{}{}, + podNamespace: "ns", + podName: "pod", + }, + { + name: "missing hook", + item: map[string]interface{}{}, + podNamespace: "ns", + podName: "pod", + hookName: "hook", + }, + { + name: "container not found", + item: unstructuredOrDie(`{"kind":"Pod","spec":{"containers":[{"name":"foo"}]}}`).Object, + podNamespace: "ns", + podName: "pod", + hookName: "hook", + hook: &v1.ExecHook{ + Container: "missing", + }, + }, + { + name: "command missing", + item: unstructuredOrDie(`{"kind":"Pod","spec":{"containers":[{"name":"foo"}]}}`).Object, + podNamespace: "ns", + podName: "pod", + hookName: "hook", + hook: &v1.ExecHook{ + Container: "foo", + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + e := &defaultPodCommandExecutor{} + err := e.executePodCommand(arktest.NewLogger(), test.item, test.podNamespace, test.podName, test.hookName, test.hook) + assert.Error(t, err) + }) + } +} + +func TestExecutePodCommand(t *testing.T) { + tests := []struct { + name string + containerName string + expectedContainerName string + command []string + errorMode v1.HookErrorMode + expectedErrorMode v1.HookErrorMode + timeout time.Duration + expectedTimeout time.Duration + hookError error + expectedError string + }{ + { + name: "validate defaults", + command: []string{"some", "command"}, + expectedContainerName: "foo", + expectedErrorMode: v1.HookErrorModeFail, + expectedTimeout: 30 * time.Second, + }, + { + name: "use specified values", + command: []string{"some", "command"}, + containerName: "bar", + expectedContainerName: "bar", + errorMode: v1.HookErrorModeContinue, + expectedErrorMode: v1.HookErrorModeContinue, + timeout: 10 * time.Second, + expectedTimeout: 10 * time.Second, + }, + { + name: "hook error", + command: []string{"some", "command"}, + expectedContainerName: "foo", + expectedErrorMode: v1.HookErrorModeFail, + expectedTimeout: 30 * time.Second, + hookError: errors.New("hook error"), + expectedError: "hook error", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + hook := v1.ExecHook{ + Container: test.containerName, + Command: test.command, + OnError: test.errorMode, + Timeout: metav1.Duration{Duration: test.timeout}, + } + + pod, err := getAsMap(` +{ + "metadata": { + "namespace": "namespace", + "name": "name" + }, + "spec": { + "containers": [ + {"name": "foo"}, + {"name": "bar"} + ] + } +}`) + + require.NoError(t, err) + + clientConfig := &rest.Config{} + poster := &mockPoster{} + defer poster.AssertExpectations(t) + podCommandExecutor := NewPodCommandExecutor(clientConfig, poster).(*defaultPodCommandExecutor) + + streamExecutorFactory := &mockStreamExecutorFactory{} + defer streamExecutorFactory.AssertExpectations(t) + podCommandExecutor.streamExecutorFactory = streamExecutorFactory + + baseUrl, _ := url.Parse("https://some.server") + contentConfig := rest.ContentConfig{ + GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}, + } + postRequest := rest.NewRequest(nil, "POST", baseUrl, "/api/v1", contentConfig, rest.Serializers{}, nil, nil) + poster.On("Post").Return(postRequest) + + streamExecutor := &mockStreamExecutor{} + defer streamExecutor.AssertExpectations(t) + + expectedCommand := strings.Join(test.command, "&command=") + expectedURL, _ := url.Parse( + fmt.Sprintf("https://some.server/api/v1/namespaces/namespace/pods/name/exec?command=%s&container=%s&stderr=true&stdout=true", expectedCommand, test.expectedContainerName), + ) + streamExecutorFactory.On("NewExecutor", clientConfig, "POST", expectedURL).Return(streamExecutor, nil) + + var stdout, stderr bytes.Buffer + expectedStreamOptions := remotecommand.StreamOptions{ + SupportedProtocols: remotecommandconsts.SupportedStreamingProtocols, + Stdout: &stdout, + Stderr: &stderr, + } + streamExecutor.On("Stream", expectedStreamOptions).Return(test.hookError) + + err = podCommandExecutor.executePodCommand(arktest.NewLogger(), pod, "namespace", "name", "hookName", &hook) + if test.expectedError != "" { + assert.EqualError(t, err, test.expectedError) + return + } + + require.NoError(t, err) + }) + } +} + +func TestEnsureContainerExists(t *testing.T) { + pod := map[string]interface{}{ + "spec": map[string]interface{}{ + "containers": []interface{}{ + map[string]interface{}{ + "name": "foo", + }, + }, + }, + } + + err := ensureContainerExists(pod, "bar") + assert.EqualError(t, err, `no such container: "bar"`) + + err = ensureContainerExists(pod, "foo") + assert.NoError(t, err) +} + +type mockStreamExecutorFactory struct { + mock.Mock +} + +func (f *mockStreamExecutorFactory) NewExecutor(config *rest.Config, method string, url *url.URL) (remotecommand.StreamExecutor, error) { + args := f.Called(config, method, url) + return args.Get(0).(remotecommand.StreamExecutor), args.Error(1) +} + +type mockStreamExecutor struct { + mock.Mock + remotecommand.StreamExecutor +} + +func (e *mockStreamExecutor) Stream(options remotecommand.StreamOptions) error { + args := e.Called(options) + return args.Error(0) +} + +type mockPoster struct { + mock.Mock +} + +func (p *mockPoster) Post() *rest.Request { + args := p.Called() + return args.Get(0).(*rest.Request) +} + +type mockPodCommandExecutor struct { + mock.Mock +} + +func (e *mockPodCommandExecutor) executePodCommand(log *logrus.Entry, item map[string]interface{}, namespace, name, hookName string, hook *v1.ExecHook) error { + args := e.Called(log, item, namespace, name, hookName, hook) + return args.Error(0) +} diff --git a/pkg/backup/resource_backupper.go b/pkg/backup/resource_backupper.go new file mode 100644 index 000000000..28d8b293d --- /dev/null +++ b/pkg/backup/resource_backupper.go @@ -0,0 +1,252 @@ +/* +Copyright 2017 the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backup + +import ( + api "github.com/heptio/ark/pkg/apis/ark/v1" + "github.com/heptio/ark/pkg/client" + "github.com/heptio/ark/pkg/discovery" + "github.com/heptio/ark/pkg/util/collections" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + kuberrs "k8s.io/apimachinery/pkg/util/errors" +) + +type resourceBackupperFactory interface { + newResourceBackupper( + log *logrus.Entry, + backup *api.Backup, + namespaces *collections.IncludesExcludes, + resources *collections.IncludesExcludes, + labelSelector string, + dynamicFactory client.DynamicFactory, + discoveryHelper discovery.Helper, + backedUpItems map[itemKey]struct{}, + cohabitatingResources map[string]*cohabitatingResource, + actions map[schema.GroupResource]Action, + podCommandExecutor podCommandExecutor, + tarWriter tarWriter, + resourceHooks []resourceHook, + ) resourceBackupper +} + +type defaultResourceBackupperFactory struct{} + +func (f *defaultResourceBackupperFactory) newResourceBackupper( + log *logrus.Entry, + backup *api.Backup, + namespaces *collections.IncludesExcludes, + resources *collections.IncludesExcludes, + labelSelector string, + dynamicFactory client.DynamicFactory, + discoveryHelper discovery.Helper, + backedUpItems map[itemKey]struct{}, + cohabitatingResources map[string]*cohabitatingResource, + actions map[schema.GroupResource]Action, + podCommandExecutor podCommandExecutor, + tarWriter tarWriter, + resourceHooks []resourceHook, +) resourceBackupper { + return &defaultResourceBackupper{ + log: log, + backup: backup, + namespaces: namespaces, + resources: resources, + labelSelector: labelSelector, + dynamicFactory: dynamicFactory, + discoveryHelper: discoveryHelper, + backedUpItems: backedUpItems, + actions: actions, + cohabitatingResources: cohabitatingResources, + podCommandExecutor: podCommandExecutor, + tarWriter: tarWriter, + resourceHooks: resourceHooks, + + itemBackupperFactory: &defaultItemBackupperFactory{}, + } +} + +type resourceBackupper interface { + backupResource(group *metav1.APIResourceList, resource metav1.APIResource) error +} + +type defaultResourceBackupper struct { + log *logrus.Entry + backup *api.Backup + namespaces *collections.IncludesExcludes + resources *collections.IncludesExcludes + labelSelector string + dynamicFactory client.DynamicFactory + discoveryHelper discovery.Helper + backedUpItems map[itemKey]struct{} + cohabitatingResources map[string]*cohabitatingResource + actions map[schema.GroupResource]Action + podCommandExecutor podCommandExecutor + tarWriter tarWriter + resourceHooks []resourceHook + + itemBackupperFactory itemBackupperFactory +} + +// backupResource backs up all the objects for a given group-version-resource. +func (rb *defaultResourceBackupper) backupResource( + group *metav1.APIResourceList, + resource metav1.APIResource, +) error { + var errs []error + + gv, err := schema.ParseGroupVersion(group.GroupVersion) + if err != nil { + return errors.Wrapf(err, "error parsing GroupVersion %s", group.GroupVersion) + } + gr := schema.GroupResource{Group: gv.Group, Resource: resource.Name} + grString := gr.String() + + log := rb.log.WithField("groupResource", grString) + + switch { + case rb.backup.Spec.IncludeClusterResources == nil: + // when IncludeClusterResources == nil (auto), only directly + // back up cluster-scoped resources if we're doing a full-cluster + // (all namespaces) backup. Note that in the case of a subset of + // namespaces being backed up, some related cluster-scoped resources + // may still be backed up if triggered by a custom action (e.g. PVC->PV). + if !resource.Namespaced && !rb.namespaces.IncludeEverything() { + log.Info("Skipping resource because it's cluster-scoped and only specific namespaces are included in the backup") + return nil + } + case *rb.backup.Spec.IncludeClusterResources == false: + if !resource.Namespaced { + log.Info("Skipping resource because it's cluster-scoped") + return nil + } + case *rb.backup.Spec.IncludeClusterResources == true: + // include the resource, no action required + } + + if !rb.resources.ShouldInclude(grString) { + log.Infof("Resource is excluded") + return nil + } + + if cohabitator, found := rb.cohabitatingResources[resource.Name]; found { + if cohabitator.seen { + log.WithFields( + logrus.Fields{ + "cohabitatingResource1": cohabitator.groupResource1.String(), + "cohabitatingResource2": cohabitator.groupResource2.String(), + }, + ).Infof("Skipping resource because it cohabitates and we've already processed it") + return nil + } + cohabitator.seen = true + } + + itemBackupper := rb.itemBackupperFactory.newItemBackupper( + rb.backup, + rb.namespaces, + rb.resources, + rb.backedUpItems, + rb.actions, + rb.podCommandExecutor, + rb.tarWriter, + rb.resourceHooks, + rb.dynamicFactory, + rb.discoveryHelper, + ) + + var namespacesToList []string + if resource.Namespaced { + namespacesToList = getNamespacesToList(rb.namespaces) + } else { + namespacesToList = []string{""} + } + for _, namespace := range namespacesToList { + resourceClient, err := rb.dynamicFactory.ClientForGroupVersionResource(gv, resource, namespace) + if err != nil { + return err + } + + unstructuredList, err := resourceClient.List(metav1.ListOptions{LabelSelector: rb.labelSelector}) + if err != nil { + return errors.WithStack(err) + } + + // do the backup + items, err := meta.ExtractList(unstructuredList) + if err != nil { + return errors.WithStack(err) + } + + for _, item := range items { + unstructured, ok := item.(runtime.Unstructured) + if !ok { + errs = append(errs, errors.Errorf("unexpected type %T", item)) + continue + } + + if err := itemBackupper.backupItem(log, unstructured, gr); err != nil { + errs = append(errs, err) + } + } + } + + return kuberrs.NewAggregate(errs) +} + +// getNamespacesToList examines ie and resolves the includes and excludes to a full list of +// namespaces to list. If ie is nil or it includes *, the result is just "" (list across all +// namespaces). Otherwise, the result is a list of every included namespace minus all excluded ones. +func getNamespacesToList(ie *collections.IncludesExcludes) []string { + if ie == nil { + return []string{""} + } + + if ie.ShouldInclude("*") { + // "" means all namespaces + return []string{""} + } + + var list []string + for _, i := range ie.GetIncludes() { + if ie.ShouldInclude(i) { + list = append(list, i) + } + } + + return list +} + +type cohabitatingResource struct { + resource string + groupResource1 schema.GroupResource + groupResource2 schema.GroupResource + seen bool +} + +func newCohabitatingResource(resource, group1, group2 string) *cohabitatingResource { + return &cohabitatingResource{ + resource: resource, + groupResource1: schema.GroupResource{Group: group1, Resource: resource}, + groupResource2: schema.GroupResource{Group: group2, Resource: resource}, + seen: false, + } +} diff --git a/pkg/backup/resource_backupper_test.go b/pkg/backup/resource_backupper_test.go new file mode 100644 index 000000000..78b7828df --- /dev/null +++ b/pkg/backup/resource_backupper_test.go @@ -0,0 +1,744 @@ +/* +Copyright 2017 the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backup + +import ( + "testing" + + "github.com/heptio/ark/pkg/apis/ark/v1" + "github.com/heptio/ark/pkg/client" + "github.com/heptio/ark/pkg/discovery" + "github.com/heptio/ark/pkg/util/collections" + arktest "github.com/heptio/ark/pkg/util/test" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func TestBackupResource(t *testing.T) { + var ( + trueVal = true + falseVal = false + truePointer = &trueVal + falsePointer = &falseVal + ) + + tests := []struct { + name string + namespaces *collections.IncludesExcludes + resources *collections.IncludesExcludes + expectSkip bool + expectedListedNamespaces []string + apiGroup *metav1.APIResourceList + apiResource metav1.APIResource + groupVersion schema.GroupVersion + groupResource schema.GroupResource + listResponses [][]*unstructured.Unstructured + includeClusterResources *bool + }{ + { + name: "resource not included", + apiGroup: v1Group, + apiResource: podsResource, + resources: collections.NewIncludesExcludes().Excludes("pods"), + expectSkip: true, + }, + { + name: "list all namespaces", + namespaces: collections.NewIncludesExcludes(), + resources: collections.NewIncludesExcludes(), + expectedListedNamespaces: []string{""}, + apiGroup: v1Group, + apiResource: podsResource, + groupVersion: schema.GroupVersion{Group: "", Version: "v1"}, + groupResource: schema.GroupResource{Group: "", Resource: "pods"}, + listResponses: [][]*unstructured.Unstructured{ + { + unstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"myns","name":"myname1"}}`), + unstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"myns","name":"myname2"}}`), + }, + }, + }, + { + name: "list selected namespaces", + namespaces: collections.NewIncludesExcludes().Includes("a", "b"), + resources: collections.NewIncludesExcludes(), + expectedListedNamespaces: []string{"a", "b"}, + apiGroup: v1Group, + apiResource: podsResource, + groupVersion: schema.GroupVersion{Group: "", Version: "v1"}, + groupResource: schema.GroupResource{Group: "", Resource: "pods"}, + listResponses: [][]*unstructured.Unstructured{ + { + unstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"a","name":"myname1"}}`), + unstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"a","name":"myname2"}}`), + }, + { + unstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"b","name":"myname3"}}`), + unstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"b","name":"myname4"}}`), + }, + }, + }, + { + name: "list all namespaces - cluster scoped", + namespaces: collections.NewIncludesExcludes(), + resources: collections.NewIncludesExcludes(), + expectedListedNamespaces: []string{""}, + apiGroup: certificatesGroup, + apiResource: certificateSigningRequestsResource, + groupVersion: schema.GroupVersion{Group: "certificates.k8s.io", Version: "v1beta1"}, + groupResource: schema.GroupResource{Group: "certificates.k8s.io", Resource: "certificatesigningrequests"}, + listResponses: [][]*unstructured.Unstructured{ + { + unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`), + unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`), + }, + }, + }, + { + name: "should include cluster-scoped resource if backing up subset of namespaces and --include-cluster-resources=true", + namespaces: collections.NewIncludesExcludes().Includes("ns-1"), + resources: collections.NewIncludesExcludes(), + includeClusterResources: truePointer, + expectedListedNamespaces: []string{""}, + apiGroup: certificatesGroup, + apiResource: certificateSigningRequestsResource, + groupVersion: schema.GroupVersion{Group: "certificates.k8s.io", Version: "v1beta1"}, + groupResource: schema.GroupResource{Group: "certificates.k8s.io", Resource: "certificatesigningrequests"}, + listResponses: [][]*unstructured.Unstructured{ + { + unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`), + unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`), + }, + }, + }, + { + name: "should not include cluster-scoped resource if backing up subset of namespaces and --include-cluster-resources=false", + namespaces: collections.NewIncludesExcludes().Includes("ns-1"), + resources: collections.NewIncludesExcludes(), + includeClusterResources: falsePointer, + apiGroup: certificatesGroup, + apiResource: certificateSigningRequestsResource, + groupVersion: schema.GroupVersion{Group: "certificates.k8s.io", Version: "v1beta1"}, + groupResource: schema.GroupResource{Group: "certificates.k8s.io", Resource: "certificatesigningrequests"}, + expectSkip: true, + }, + { + name: "should not include cluster-scoped resource if backing up subset of namespaces and --include-cluster-resources=nil", + namespaces: collections.NewIncludesExcludes().Includes("ns-1"), + resources: collections.NewIncludesExcludes(), + includeClusterResources: nil, + apiGroup: certificatesGroup, + apiResource: certificateSigningRequestsResource, + groupVersion: schema.GroupVersion{Group: "certificates.k8s.io", Version: "v1beta1"}, + groupResource: schema.GroupResource{Group: "certificates.k8s.io", Resource: "certificatesigningrequests"}, + expectSkip: true, + }, + { + name: "should include cluster-scoped resource if backing up all namespaces and --include-cluster-resources=true", + namespaces: collections.NewIncludesExcludes(), + resources: collections.NewIncludesExcludes(), + includeClusterResources: truePointer, + expectedListedNamespaces: []string{""}, + apiGroup: certificatesGroup, + apiResource: certificateSigningRequestsResource, + groupVersion: schema.GroupVersion{Group: "certificates.k8s.io", Version: "v1beta1"}, + groupResource: schema.GroupResource{Group: "certificates.k8s.io", Resource: "certificatesigningrequests"}, + listResponses: [][]*unstructured.Unstructured{ + { + unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`), + unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`), + }, + }, + }, + { + name: "should not include cluster-scoped resource if backing up all namespaces and --include-cluster-resources=false", + namespaces: collections.NewIncludesExcludes(), + resources: collections.NewIncludesExcludes(), + includeClusterResources: falsePointer, + apiGroup: certificatesGroup, + apiResource: certificateSigningRequestsResource, + groupVersion: schema.GroupVersion{Group: "certificates.k8s.io", Version: "v1beta1"}, + groupResource: schema.GroupResource{Group: "certificates.k8s.io", Resource: "certificatesigningrequests"}, + expectSkip: true, + }, + { + name: "should include cluster-scoped resource if backing up all namespaces and --include-cluster-resources=nil", + namespaces: collections.NewIncludesExcludes(), + resources: collections.NewIncludesExcludes(), + includeClusterResources: nil, + expectedListedNamespaces: []string{""}, + apiGroup: certificatesGroup, + apiResource: certificateSigningRequestsResource, + groupVersion: schema.GroupVersion{Group: "certificates.k8s.io", Version: "v1beta1"}, + groupResource: schema.GroupResource{Group: "certificates.k8s.io", Resource: "certificatesigningrequests"}, + listResponses: [][]*unstructured.Unstructured{ + { + unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`), + unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`), + }, + }, + }, + } + + for _, test := range tests { + backup := &v1.Backup{ + Spec: v1.BackupSpec{ + IncludeClusterResources: test.includeClusterResources, + }, + } + + labelSelector := "foo=bar" + + dynamicFactory := &arktest.FakeDynamicFactory{} + defer dynamicFactory.AssertExpectations(t) + + discoveryHelper := arktest.NewFakeDiscoveryHelper(true, nil) + + backedUpItems := map[itemKey]struct{}{ + {resource: "foo", namespace: "ns", name: "name"}: struct{}{}, + } + + cohabitatingResources := map[string]*cohabitatingResource{ + "deployments": newCohabitatingResource("deployments", "extensions", "apps"), + "networkpolicies": newCohabitatingResource("networkpolicies", "extensions", "networking.k8s.io"), + } + + actions := map[schema.GroupResource]Action{ + {Group: "", Resource: "pods"}: &fakeAction{}, + } + + resourceHooks := []resourceHook{ + {name: "myhook"}, + } + + podCommandExecutor := &mockPodCommandExecutor{} + defer podCommandExecutor.AssertExpectations(t) + + tarWriter := &fakeTarWriter{} + + t.Run(test.name, func(t *testing.T) { + rb := (&defaultResourceBackupperFactory{}).newResourceBackupper( + arktest.NewLogger(), + backup, + test.namespaces, + test.resources, + labelSelector, + dynamicFactory, + discoveryHelper, + backedUpItems, + cohabitatingResources, + actions, + podCommandExecutor, + tarWriter, + resourceHooks, + ).(*defaultResourceBackupper) + + itemBackupperFactory := &mockItemBackupperFactory{} + defer itemBackupperFactory.AssertExpectations(t) + rb.itemBackupperFactory = itemBackupperFactory + + if !test.expectSkip { + itemBackupper := &mockItemBackupper{} + defer itemBackupper.AssertExpectations(t) + + itemBackupperFactory.On("newItemBackupper", + backup, + test.namespaces, + test.resources, + backedUpItems, + actions, + podCommandExecutor, + tarWriter, + resourceHooks, + dynamicFactory, + discoveryHelper, + ).Return(itemBackupper) + + for i, namespace := range test.expectedListedNamespaces { + client := &arktest.FakeDynamicClient{} + defer client.AssertExpectations(t) + + dynamicFactory.On("ClientForGroupVersionResource", test.groupVersion, test.apiResource, namespace).Return(client, nil) + + list := &unstructured.UnstructuredList{ + Items: []unstructured.Unstructured{}, + } + for _, item := range test.listResponses[i] { + list.Items = append(list.Items, *item) + itemBackupper.On("backupItem", mock.AnythingOfType("*logrus.Entry"), item, test.groupResource).Return(nil) + } + client.On("List", metav1.ListOptions{LabelSelector: labelSelector}).Return(list, nil) + + } + } + err := rb.backupResource(test.apiGroup, test.apiResource) + require.NoError(t, err) + }) + } +} + +func TestBackupResourceCohabitation(t *testing.T) { + tests := []struct { + name string + apiResource metav1.APIResource + apiGroup1 *metav1.APIResourceList + groupVersion1 schema.GroupVersion + apiGroup2 *metav1.APIResourceList + groupVersion2 schema.GroupVersion + }{ + { + name: "deployments - extensions first", + apiResource: deploymentsResource, + apiGroup1: extensionsGroup, + groupVersion1: extensionsGroupVersion, + apiGroup2: appsGroup, + groupVersion2: appsGroupVersion, + }, + { + name: "deployments - apps first", + apiResource: deploymentsResource, + apiGroup1: appsGroup, + groupVersion1: appsGroupVersion, + apiGroup2: extensionsGroup, + groupVersion2: extensionsGroupVersion, + }, + { + name: "networkpolicies - extensions first", + apiResource: networkPoliciesResource, + apiGroup1: extensionsGroup, + groupVersion1: extensionsGroupVersion, + apiGroup2: networkingGroup, + groupVersion2: networkingGroupVersion, + }, + { + name: "networkpolicies - networking first", + apiResource: networkPoliciesResource, + apiGroup1: networkingGroup, + groupVersion1: networkingGroupVersion, + apiGroup2: extensionsGroup, + groupVersion2: extensionsGroupVersion, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + backup := &v1.Backup{} + + namespaces := collections.NewIncludesExcludes().Includes("*") + resources := collections.NewIncludesExcludes().Includes("*") + + labelSelector := "foo=bar" + + dynamicFactory := &arktest.FakeDynamicFactory{} + defer dynamicFactory.AssertExpectations(t) + + discoveryHelper := arktest.NewFakeDiscoveryHelper(true, nil) + + backedUpItems := map[itemKey]struct{}{ + {resource: "foo", namespace: "ns", name: "name"}: struct{}{}, + } + + cohabitatingResources := map[string]*cohabitatingResource{ + "deployments": newCohabitatingResource("deployments", "extensions", "apps"), + "networkpolicies": newCohabitatingResource("networkpolicies", "extensions", "networking.k8s.io"), + } + + actions := map[schema.GroupResource]Action{ + {Group: "", Resource: "pods"}: &fakeAction{}, + } + + resourceHooks := []resourceHook{ + {name: "myhook"}, + } + + podCommandExecutor := &mockPodCommandExecutor{} + defer podCommandExecutor.AssertExpectations(t) + + tarWriter := &fakeTarWriter{} + + rb := (&defaultResourceBackupperFactory{}).newResourceBackupper( + arktest.NewLogger(), + backup, + namespaces, + resources, + labelSelector, + dynamicFactory, + discoveryHelper, + backedUpItems, + cohabitatingResources, + actions, + podCommandExecutor, + tarWriter, + resourceHooks, + ).(*defaultResourceBackupper) + + itemBackupperFactory := &mockItemBackupperFactory{} + defer itemBackupperFactory.AssertExpectations(t) + rb.itemBackupperFactory = itemBackupperFactory + + itemBackupper := &mockItemBackupper{} + defer itemBackupper.AssertExpectations(t) + + itemBackupperFactory.On("newItemBackupper", + backup, + namespaces, + resources, + backedUpItems, + actions, + podCommandExecutor, + tarWriter, + resourceHooks, + dynamicFactory, + discoveryHelper, + ).Return(itemBackupper) + + client := &arktest.FakeDynamicClient{} + defer client.AssertExpectations(t) + + // STEP 1: make sure the initial backup goes through + dynamicFactory.On("ClientForGroupVersionResource", test.groupVersion1, test.apiResource, "").Return(client, nil) + client.On("List", metav1.ListOptions{LabelSelector: labelSelector}).Return(&unstructured.UnstructuredList{}, nil) + + // STEP 2: do the backup + err := rb.backupResource(test.apiGroup1, test.apiResource) + require.NoError(t, err) + + // STEP 3: try to back up the cohabitating resource + err = rb.backupResource(test.apiGroup2, test.apiResource) + require.NoError(t, err) + }) + } +} + +type mockItemBackupperFactory struct { + mock.Mock +} + +func (ibf *mockItemBackupperFactory) newItemBackupper( + backup *v1.Backup, + namespaces, resources *collections.IncludesExcludes, + backedUpItems map[itemKey]struct{}, + actions map[schema.GroupResource]Action, + podCommandExecutor podCommandExecutor, + tarWriter tarWriter, + resourceHooks []resourceHook, + dynamicFactory client.DynamicFactory, + discoveryHelper discovery.Helper, +) ItemBackupper { + args := ibf.Called( + backup, + namespaces, + resources, + backedUpItems, + actions, + podCommandExecutor, + tarWriter, + resourceHooks, + dynamicFactory, + discoveryHelper, + ) + return args.Get(0).(ItemBackupper) +} + +/* +func TestBackupResource2(t *testing.T) { + tests := []struct { + name string + resourceIncludesExcludes *collections.IncludesExcludes + resourceGroup string + resourceVersion string + resourceGV string + resourceName string + resourceNamespaced bool + namespaceIncludesExcludes *collections.IncludesExcludes + expectedListedNamespaces []string + lists []string + labelSelector string + actions map[string]Action + expectedActionIDs map[string][]string + deploymentsBackedUp bool + expectedDeploymentsBackedUp bool + networkPoliciesBackedUp bool + expectedNetworkPoliciesBackedUp bool + }{ + { + name: "should not include resource", + resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("pods"), + resourceGV: "v1", + resourceName: "secrets", + resourceNamespaced: true, + }, + { + name: "should skip deployments.extensions if we've seen deployments.apps", + resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), + resourceGV: "extensions/v1beta1", + resourceName: "deployments", + resourceNamespaced: true, + deploymentsBackedUp: true, + expectedDeploymentsBackedUp: true, + }, + { + name: "should skip deployments.apps if we've seen deployments.extensions", + resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), + resourceGV: "apps/v1beta1", + resourceName: "deployments", + resourceNamespaced: true, + deploymentsBackedUp: true, + expectedDeploymentsBackedUp: true, + }, + { + name: "should skip networkpolicies.extensions if we've seen networkpolicies.networking.k8s.io", + resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), + resourceGV: "extensions/v1beta1", + resourceName: "networkpolicies", + resourceNamespaced: true, + networkPoliciesBackedUp: true, + expectedNetworkPoliciesBackedUp: true, + }, + { + name: "should skip networkpolicies.networking.k8s.io if we've seen networkpolicies.extensions", + resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), + resourceGV: "networking.k8s.io/v1", + resourceName: "networkpolicies", + resourceNamespaced: true, + networkPoliciesBackedUp: true, + expectedNetworkPoliciesBackedUp: true, + }, + { + name: "list per namespace when not including *", + resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), + resourceGroup: "apps", + resourceVersion: "v1beta1", + resourceGV: "apps/v1beta1", + resourceName: "deployments", + resourceNamespaced: true, + namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("a", "b"), + expectedListedNamespaces: []string{"a", "b"}, + lists: []string{ + `{ + "apiVersion": "apps/v1beta1", + "kind": "DeploymentList", + "items": [ + { + "metadata": { + "namespace": "a", + "name": "1" + } + } + ] + }`, + `{ + "apiVersion": "apps/v1beta1v1", + "kind": "DeploymentList", + "items": [ + { + "metadata": { + "namespace": "b", + "name": "2" + } + } + ] + }`, + }, + expectedDeploymentsBackedUp: true, + }, + { + name: "list all namespaces when including *", + resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), + resourceGroup: "networking.k8s.io", + resourceVersion: "v1", + resourceGV: "networking.k8s.io/v1", + resourceName: "networkpolicies", + resourceNamespaced: true, + namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), + expectedListedNamespaces: []string{""}, + lists: []string{ + `{ + "apiVersion": "networking.k8s.io/v1", + "kind": "NetworkPolicyList", + "items": [ + { + "metadata": { + "namespace": "a", + "name": "1" + } + } + ] + }`, + }, + expectedNetworkPoliciesBackedUp: true, + }, + { + name: "list all namespaces when cluster-scoped, even with namespace includes", + resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), + resourceGroup: "certificates.k8s.io", + resourceVersion: "v1beta1", + resourceGV: "certificates.k8s.io/v1beta1", + resourceName: "certificatesigningrequests", + resourceNamespaced: false, + namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("a"), + expectedListedNamespaces: []string{""}, + labelSelector: "a=b", + lists: []string{ + `{ + "apiVersion": "certifiaces.k8s.io/v1beta1", + "kind": "CertificateSigningRequestList", + "items": [ + { + "metadata": { + "name": "1", + "labels": { + "a": "b" + } + } + } + ] + }`, + }, + }, + { + name: "use a custom action", + resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), + resourceGroup: "certificates.k8s.io", + resourceVersion: "v1beta1", + resourceGV: "certificates.k8s.io/v1beta1", + resourceName: "certificatesigningrequests", + resourceNamespaced: false, + namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("a"), + expectedListedNamespaces: []string{""}, + labelSelector: "a=b", + lists: []string{ + `{ + "apiVersion": "certificates.k8s.io/v1beta1", + "kind": "CertificateSigningRequestList", + "items": [ + { + "metadata": { + "name": "1", + "labels": { + "a": "b" + } + } + } + ] +}`, + }, + actions: map[string]Action{ + "certificatesigningrequests": &fakeAction{}, + "other": &fakeAction{}, + }, + expectedActionIDs: map[string][]string{ + "certificatesigningrequests": {"1"}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var labelSelector *metav1.LabelSelector + if test.labelSelector != "" { + s, err := metav1.ParseToLabelSelector(test.labelSelector) + require.NoError(t, err) + labelSelector = s + } + + log, _ := testlogger.NewNullLogger() + + ctx := &backupContext{ + backup: &v1.Backup{ + Spec: v1.BackupSpec{ + LabelSelector: labelSelector, + }, + }, + resourceIncludesExcludes: test.resourceIncludesExcludes, + namespaceIncludesExcludes: test.namespaceIncludesExcludes, + deploymentsBackedUp: test.deploymentsBackedUp, + networkPoliciesBackedUp: test.networkPoliciesBackedUp, + logger: log, + } + + group := &metav1.APIResourceList{ + GroupVersion: test.resourceGV, + } + + resource := metav1.APIResource{Name: test.resourceName, Namespaced: test.resourceNamespaced} + + itemBackupper := &mockItemBackupper{} + + var actualActionIDs map[string][]string + + dynamicFactory := &arktest.FakeDynamicFactory{} + gvr := schema.GroupVersionResource{Group: test.resourceGroup, Version: test.resourceVersion} + gr := schema.GroupResource{Group: test.resourceGroup, Resource: test.resourceName} + for i, namespace := range test.expectedListedNamespaces { + obj := toRuntimeObject(t, test.lists[i]) + + client := &arktest.FakeDynamicClient{} + client.On("List", metav1.ListOptions{LabelSelector: test.labelSelector}).Return(obj, nil) + dynamicFactory.On("ClientForGroupVersionResource", gvr, resource, namespace).Return(client, nil) + + action := test.actions[test.resourceName] + + list, err := meta.ExtractList(obj) + require.NoError(t, err) + for i := range list { + item := list[i].(*unstructured.Unstructured) + itemBackupper.On("backupItem", ctx, item, gr).Return(nil) + if action != nil { + a, err := meta.Accessor(item) + require.NoError(t, err) + ns := a.GetNamespace() + name := a.GetName() + id := ns + if id != "" { + id += "/" + } + id += name + if actualActionIDs == nil { + actualActionIDs = make(map[string][]string) + } + actualActionIDs[test.resourceName] = append(actualActionIDs[test.resourceName], id) + } + } + } + + resources := map[schema.GroupVersionResource]schema.GroupVersionResource{ + schema.GroupVersionResource{Resource: "certificatesigningrequests"}: schema.GroupVersionResource{Group: "certificates.k8s.io", Version: "v1beta1", Resource: "certificatesigningrequests"}, + schema.GroupVersionResource{Resource: "other"}: schema.GroupVersionResource{Group: "somegroup", Version: "someversion", Resource: "otherthings"}, + } + discoveryHelper := arktest.NewFakeDiscoveryHelper(false, resources) + + podCommandExecutor := &arktest.PodCommandExecutor{} + defer podCommandExecutor.AssertExpectations(t) + + kb, err := NewKubernetesBackupper(discoveryHelper, dynamicFactory, test.actions, podCommandExecutor) + require.NoError(t, err) + backupper := kb.(*kubernetesBackupper) + backupper.itemBackupper = itemBackupper + + err = backupper.backupResource(ctx, group, resource) + + assert.Equal(t, test.expectedDeploymentsBackedUp, ctx.deploymentsBackedUp) + assert.Equal(t, test.expectedNetworkPoliciesBackedUp, ctx.networkPoliciesBackedUp) + assert.Equal(t, test.expectedActionIDs, actualActionIDs) + }) + } +} +*/ diff --git a/pkg/backup/volume_snapshot_action.go b/pkg/backup/volume_snapshot_action.go index 203dd8a62..1d0403561 100644 --- a/pkg/backup/volume_snapshot_action.go +++ b/pkg/backup/volume_snapshot_action.go @@ -18,12 +18,14 @@ package backup import ( "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/clock" api "github.com/heptio/ark/pkg/apis/ark/v1" "github.com/heptio/ark/pkg/cloudprovider" - "github.com/heptio/ark/pkg/util/collections" kubeutil "github.com/heptio/ark/pkg/util/kube" ) @@ -38,8 +40,6 @@ type volumeSnapshotAction struct { clock clock.Clock } -var _ Action = &volumeSnapshotAction{} - func NewVolumeSnapshotAction(snapshotService cloudprovider.SnapshotService) (Action, error) { if snapshotService == nil { return nil, errors.New("snapshotService cannot be nil") @@ -54,53 +54,56 @@ func NewVolumeSnapshotAction(snapshotService cloudprovider.SnapshotService) (Act // Execute triggers a snapshot for the volume/disk underlying a PersistentVolume if the provided // backup has volume snapshots enabled and the PV is of a compatible type. Also records cloud // disk type and IOPS (if applicable) to be able to restore to current state later. -func (a *volumeSnapshotAction) Execute(ctx *backupContext, volume map[string]interface{}, backupper itemBackupper) error { - var ( - backup = ctx.backup - backupName = kubeutil.NamespaceAndName(backup) - ) +func (a *volumeSnapshotAction) Execute(log *logrus.Entry, item runtime.Unstructured, backup *api.Backup) ([]ResourceIdentifier, error) { + var noAdditionalItems []ResourceIdentifier + + log.Info("Executing volumeSnapshotAction") if backup.Spec.SnapshotVolumes != nil && !*backup.Spec.SnapshotVolumes { - ctx.infof("Backup %q has volume snapshots disabled; skipping volume snapshot action.", backupName) - return nil + log.Info("Backup has volume snapshots disabled; skipping volume snapshot action.") + return noAdditionalItems, nil } - metadata := volume["metadata"].(map[string]interface{}) - name := metadata["name"].(string) + metadata, err := meta.Accessor(item) + if err != nil { + return noAdditionalItems, errors.WithStack(err) + } + + name := metadata.GetName() var pvFailureDomainZone string + labels := metadata.GetLabels() - if labelsMap, err := collections.GetMap(metadata, "labels"); err != nil { - ctx.infof("error getting labels on PersistentVolume %q for backup %q: %v", name, backupName, err) + if labels[zoneLabel] != "" { + pvFailureDomainZone = labels[zoneLabel] } else { - if labelsMap[zoneLabel] != nil { - pvFailureDomainZone = labelsMap[zoneLabel].(string) - } else { - ctx.infof("label %q is not present on PersistentVolume %q for backup %q.", zoneLabel, name, backupName) - } + log.Infof("label %q is not present on PersistentVolume", zoneLabel) } - volumeID, err := kubeutil.GetVolumeID(volume) + volumeID, err := kubeutil.GetVolumeID(item.UnstructuredContent()) // non-nil error means it's a supported PV source but volume ID can't be found if err != nil { - return errors.Wrapf(err, "error getting volume ID for backup %q, PersistentVolume %q", backupName, name) + return noAdditionalItems, errors.Wrapf(err, "error getting volume ID for PersistentVolume") } // no volumeID / nil error means unsupported PV source if volumeID == "" { - ctx.infof("Backup %q: PersistentVolume %q is not a supported volume type for snapshots, skipping.", backupName, name) - return nil + log.Info("PersistentVolume is not a supported volume type for snapshots, skipping.") + return noAdditionalItems, nil } - ctx.infof("Backup %q: snapshotting PersistentVolume %q, volume-id %q", backupName, name, volumeID) + log = log.WithField("volumeID", volumeID) + + log.Info("Snapshotting PersistentVolume") snapshotID, err := a.snapshotService.CreateSnapshot(volumeID, pvFailureDomainZone) if err != nil { - ctx.infof("error creating snapshot for backup %q, volume %q, volume-id %q: %v", backupName, name, volumeID, err) - return err + // log+error on purpose - log goes to the per-backup log file, error goes to the backup + log.WithError(err).Error("error creating snapshot") + return noAdditionalItems, errors.WithMessage(err, "error creating snapshot") } volumeType, iops, err := a.snapshotService.GetVolumeInfo(volumeID, pvFailureDomainZone) if err != nil { - ctx.infof("error getting volume info for backup %q, volume %q, volume-id %q: %v", backupName, name, volumeID, err) - return err + log.WithError(err).Error("error getting volume info") + return noAdditionalItems, errors.WithMessage(err, "error getting volume info") } if backup.Status.VolumeBackups == nil { @@ -114,5 +117,5 @@ func (a *volumeSnapshotAction) Execute(ctx *backupContext, volume map[string]int AvailabilityZone: pvFailureDomainZone, } - return nil + return noAdditionalItems, nil } diff --git a/pkg/backup/volume_snapshot_action_test.go b/pkg/backup/volume_snapshot_action_test.go index dfc9dd13e..7f4a5c710 100644 --- a/pkg/backup/volume_snapshot_action_test.go +++ b/pkg/backup/volume_snapshot_action_test.go @@ -21,14 +21,15 @@ import ( "testing" "time" - testlogger "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/clock" "github.com/heptio/ark/pkg/apis/ark/v1" - . "github.com/heptio/ark/pkg/util/test" + arktest "github.com/heptio/ark/pkg/util/test" ) func TestVolumeSnapshotAction(t *testing.T) { @@ -185,7 +186,7 @@ func TestVolumeSnapshotAction(t *testing.T) { }, } - snapshotService := &FakeSnapshotService{SnapshottableVolumes: test.volumeInfo} + snapshotService := &arktest.FakeSnapshotService{SnapshottableVolumes: test.volumeInfo} vsa, _ := NewVolumeSnapshotAction(snapshotService) action := vsa.(*volumeSnapshotAction) @@ -198,15 +199,9 @@ func TestVolumeSnapshotAction(t *testing.T) { t.Fatal(err) } - log, _ := testlogger.NewNullLogger() - - ctx := &backupContext{ - backup: backup, - logger: log, - } - // method under test - err = action.Execute(ctx, pv, nil) + additionalItems, err := action.Execute(arktest.NewLogger(), &unstructured.Unstructured{Object: pv}, backup) + assert.Len(t, additionalItems, 0) gotErr := err != nil diff --git a/pkg/client/dynamic.go b/pkg/client/dynamic.go index 4d2baef9a..428bb52dd 100644 --- a/pkg/client/dynamic.go +++ b/pkg/client/dynamic.go @@ -30,12 +30,9 @@ import ( // DynamicFactory contains methods for retrieving dynamic clients for GroupVersionResources and // GroupVersionKinds. type DynamicFactory interface { - // ClientForGroupVersionResource returns a Dynamic client for the given Group and Version - // (specified in gvr) and Resource (specified in resource) for the given namespace. - ClientForGroupVersionResource(gvr schema.GroupVersionResource, resource metav1.APIResource, namespace string) (Dynamic, error) - // ClientForGroupVersionKind returns a Dynamic client for the given Group and Version - // (specified in gvk) and Resource (specified in resource) for the given namespace. - ClientForGroupVersionKind(gvk schema.GroupVersionKind, resource metav1.APIResource, namespace string) (Dynamic, error) + // ClientForGroupVersionResource returns a Dynamic client for the given group/version + // and resource for the given namespace. + ClientForGroupVersionResource(gv schema.GroupVersion, resource metav1.APIResource, namespace string) (Dynamic, error) } // dynamicFactory implements DynamicFactory. @@ -43,17 +40,17 @@ type dynamicFactory struct { clientPool dynamic.ClientPool } -var _ DynamicFactory = &dynamicFactory{} - // NewDynamicFactory returns a new ClientPool-based dynamic factory. func NewDynamicFactory(clientPool dynamic.ClientPool) DynamicFactory { return &dynamicFactory{clientPool: clientPool} } -func (f *dynamicFactory) ClientForGroupVersionResource(gvr schema.GroupVersionResource, resource metav1.APIResource, namespace string) (Dynamic, error) { - dynamicClient, err := f.clientPool.ClientForGroupVersionResource(gvr) +func (f *dynamicFactory) ClientForGroupVersionResource(gv schema.GroupVersion, resource metav1.APIResource, namespace string) (Dynamic, error) { + // client-go doesn't actually use the kind when getting the dynamic client from the client pool; + // it only needs the group and version. + dynamicClient, err := f.clientPool.ClientForGroupVersionKind(gv.WithKind("")) if err != nil { - return nil, errors.Wrapf(err, "error getting client for GroupVersionResource %s", gvr) + return nil, errors.Wrapf(err, "error getting client for GroupVersion %s, Resource %s", gv.String, resource.String()) } return &dynamicResourceClient{ @@ -61,27 +58,36 @@ func (f *dynamicFactory) ClientForGroupVersionResource(gvr schema.GroupVersionRe }, nil } -func (f *dynamicFactory) ClientForGroupVersionKind(gvk schema.GroupVersionKind, resource metav1.APIResource, namespace string) (Dynamic, error) { - dynamicClient, err := f.clientPool.ClientForGroupVersionKind(gvk) - if err != nil { - return nil, errors.Wrapf(err, "error getting client for GroupVersionKind %s", gvk) - } +// Creator creates an object. +type Creator interface { + // Create creates an object. + Create(obj *unstructured.Unstructured) (*unstructured.Unstructured, error) +} - return &dynamicResourceClient{ - resourceClient: dynamicClient.Resource(&resource, namespace), - }, nil +// Lister lists objects. +type Lister interface { + // List lists all the objects of a given resource. + List(metav1.ListOptions) (runtime.Object, error) +} + +// Watcher watches objects. +type Watcher interface { + // Watch watches for changes to objects of a given resource. + Watch(metav1.ListOptions) (watch.Interface, error) +} + +// Getter gets an object. +type Getter interface { + // Get fetches an object by name. + Get(name string, opts metav1.GetOptions) (*unstructured.Unstructured, error) } // Dynamic contains client methods that Ark needs for backing up and restoring resources. type Dynamic interface { - // Create creates an object. - Create(obj *unstructured.Unstructured) (*unstructured.Unstructured, error) - // List lists all the objects of a given resource. - List(metav1.ListOptions) (runtime.Object, error) - // Watch watches for changes to objects of a given resource. - Watch(metav1.ListOptions) (watch.Interface, error) - // Get fetches an object by name. - Get(name string, opts metav1.GetOptions) (*unstructured.Unstructured, error) + Creator + Lister + Watcher + Getter } // dynamicResourceClient implements Dynamic. diff --git a/pkg/cmd/server/server.go b/pkg/cmd/server/server.go index 07201e943..fa10e00b5 100644 --- a/pkg/cmd/server/server.go +++ b/pkg/cmd/server/server.go @@ -39,7 +39,9 @@ import ( "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" + kcorev1client "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/pkg/api/v1" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" api "github.com/heptio/ark/pkg/apis/ark/v1" @@ -134,6 +136,7 @@ func getSortedLogLevels() []string { } type server struct { + kubeClientConfig *rest.Config kubeClient kubernetes.Interface arkClient clientset.Interface backupService cloudprovider.BackupService @@ -165,6 +168,7 @@ func newServer(kubeconfig, baseName string, logger *logrus.Logger) (*server, err ctx, cancelFunc := context.WithCancel(context.Background()) s := &server{ + kubeClientConfig: clientConfig, kubeClient: kubeClient, arkClient: arkClient, discoveryClient: arkClient.Discovery(), @@ -502,7 +506,7 @@ func (s *server) runControllers(config *api.Config) error { if config.RestoreOnlyMode { s.logger.Info("Restore only mode - not starting the backup, schedule or GC controllers") } else { - backupper, err := newBackupper(discoveryHelper, s.clientPool, s.backupService, s.snapshotService) + backupper, err := newBackupper(discoveryHelper, s.clientPool, s.backupService, s.snapshotService, s.kubeClientConfig, s.kubeClient.CoreV1()) cmd.CheckError(err) backupController := controller.NewBackupController( s.sharedInformerFactory.Ark().V1().Backups(), @@ -610,23 +614,27 @@ func newBackupper( clientPool dynamic.ClientPool, backupService cloudprovider.BackupService, snapshotService cloudprovider.SnapshotService, + kubeClientConfig *rest.Config, + kubeCoreV1Client kcorev1client.CoreV1Interface, ) (backup.Backupper, error) { actions := map[string]backup.Action{} + dynamicFactory := client.NewDynamicFactory(clientPool) if snapshotService != nil { action, err := backup.NewVolumeSnapshotAction(snapshotService) if err != nil { return nil, err } - actions["persistentvolumes"] = action + actions["persistentvolumeclaims"] = backup.NewBackupPVAction() } return backup.NewKubernetesBackupper( discoveryHelper, - client.NewDynamicFactory(clientPool), + dynamicFactory, actions, + backup.NewPodCommandExecutor(kubeClientConfig, kubeCoreV1Client.RESTClient()), ) } diff --git a/pkg/controller/backup_controller.go b/pkg/controller/backup_controller.go index 58e22fb22..ff61016d5 100644 --- a/pkg/controller/backup_controller.go +++ b/pkg/controller/backup_controller.go @@ -229,16 +229,6 @@ func (controller *backupController) processBackup(key string) error { // set backup version backup.Status.Version = backupVersion - // included resources defaulting - if len(backup.Spec.IncludedResources) == 0 { - backup.Spec.IncludedResources = []string{"*"} - } - - // included namespace defaulting - if len(backup.Spec.IncludedNamespaces) == 0 { - backup.Spec.IncludedNamespaces = []string{"*"} - } - // calculate expiration if backup.Spec.TTL.Duration > 0 { backup.Status.Expiration = metav1.NewTime(controller.clock.Now().Add(backup.Spec.TTL.Duration)) diff --git a/pkg/controller/backup_controller_test.go b/pkg/controller/backup_controller_test.go index 5bdd228e8..ae45fe727 100644 --- a/pkg/controller/backup_controller_test.go +++ b/pkg/controller/backup_controller_test.go @@ -118,18 +118,16 @@ func TestProcessBackup(t *testing.T) { expectBackup: true, }, { - name: "if includednamespaces are specified, don't default to *", - key: "heptio-ark/backup1", - backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithIncludedNamespaces("ns-1"), - expectedIncludes: []string{"*"}, - expectBackup: true, + name: "if includednamespaces are specified, don't default to *", + key: "heptio-ark/backup1", + backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithIncludedNamespaces("ns-1"), + expectBackup: true, }, { - name: "ttl", - key: "heptio-ark/backup1", - backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithTTL(10 * time.Minute), - expectedIncludes: []string{"*"}, - expectBackup: true, + name: "ttl", + key: "heptio-ark/backup1", + backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithTTL(10 * time.Minute), + expectBackup: true, }, { name: "backup with SnapshotVolumes when allowSnapshots=false fails validation", @@ -138,12 +136,11 @@ func TestProcessBackup(t *testing.T) { expectBackup: false, }, { - name: "backup with SnapshotVolumes when allowSnapshots=true gets executed", - key: "heptio-ark/backup1", - backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithSnapshotVolumes(true), - allowSnapshots: true, - expectedIncludes: []string{"*"}, - expectBackup: true, + name: "backup with SnapshotVolumes when allowSnapshots=true gets executed", + key: "heptio-ark/backup1", + backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithSnapshotVolumes(true), + allowSnapshots: true, + expectBackup: true, }, } @@ -170,8 +167,6 @@ func TestProcessBackup(t *testing.T) { var expiration time.Time - var expectedNSes []string - if test.backup != nil { // add directly to the informer's store so the lister can function and so we don't have to // start the shared informers. @@ -187,14 +182,7 @@ func TestProcessBackup(t *testing.T) { backup := copy.(*v1.Backup) backup.Spec.IncludedResources = test.expectedIncludes backup.Spec.ExcludedResources = test.expectedExcludes - - if test.backup.Spec.IncludedNamespaces == nil { - expectedNSes = []string{"*"} - } else { - expectedNSes = test.backup.Spec.IncludedNamespaces - } - - backup.Spec.IncludedNamespaces = expectedNSes + backup.Spec.IncludedNamespaces = test.backup.Spec.IncludedNamespaces backup.Spec.SnapshotVolumes = test.backup.Spec.SnapshotVolumes backup.Status.Phase = v1.BackupPhaseInProgress backup.Status.Expiration.Time = expiration @@ -240,7 +228,7 @@ func TestProcessBackup(t *testing.T) { WithPhase(v1.BackupPhaseInProgress). WithIncludedResources(test.expectedIncludes...). WithExcludedResources(test.expectedExcludes...). - WithIncludedNamespaces(expectedNSes...). + WithIncludedNamespaces(test.backup.Spec.IncludedNamespaces...). WithTTL(test.backup.Spec.TTL.Duration). WithSnapshotVolumesPointer(test.backup.Spec.SnapshotVolumes). WithExpiration(expiration). @@ -256,7 +244,7 @@ func TestProcessBackup(t *testing.T) { WithPhase(v1.BackupPhaseCompleted). WithIncludedResources(test.expectedIncludes...). WithExcludedResources(test.expectedExcludes...). - WithIncludedNamespaces(expectedNSes...). + WithIncludedNamespaces(test.backup.Spec.IncludedNamespaces...). WithTTL(test.backup.Spec.TTL.Duration). WithSnapshotVolumesPointer(test.backup.Spec.SnapshotVolumes). WithExpiration(expiration). diff --git a/pkg/controller/restore_controller.go b/pkg/controller/restore_controller.go index 7487f2b84..2d6802296 100644 --- a/pkg/controller/restore_controller.go +++ b/pkg/controller/restore_controller.go @@ -231,14 +231,6 @@ func (controller *restoreController) processRestore(key string) error { return err } - // defaulting - if len(restore.Spec.IncludedNamespaces) == 0 { - restore.Spec.IncludedNamespaces = []string{"*"} - } - if len(restore.Spec.IncludedResources) == 0 { - restore.Spec.IncludedResources = []string{"*"} - } - excludedResources := sets.NewString(restore.Spec.ExcludedResources...) for _, nonrestorable := range nonRestorableResources { if !excludedResources.Has(nonrestorable) { diff --git a/pkg/controller/restore_controller_test.go b/pkg/controller/restore_controller_test.go index d7068a491..352fbeaf8 100644 --- a/pkg/controller/restore_controller_test.go +++ b/pkg/controller/restore_controller_test.go @@ -175,7 +175,7 @@ func TestProcessRestore(t *testing.T) { restore: NewRestore("foo", "bar", "", "ns-1", "", api.RestorePhaseNew).Restore, expectedErr: false, expectedRestoreUpdates: []*api.Restore{ - NewRestore("foo", "bar", "", "ns-1", "*", api.RestorePhaseFailedValidation). + NewRestore("foo", "bar", "", "ns-1", "", api.RestorePhaseFailedValidation). WithValidationError("BackupName must be non-empty and correspond to the name of a backup in object storage."). Restore, }, @@ -187,8 +187,8 @@ func TestProcessRestore(t *testing.T) { expectedErr: false, backupServiceGetBackupError: errors.New("no backup here"), expectedRestoreUpdates: []*api.Restore{ - NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseInProgress).Restore, - NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseCompleted). + NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).Restore, + NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseCompleted). WithErrors(api.RestoreResult{ Ark: []string{"no backup here"}, }). @@ -202,8 +202,8 @@ func TestProcessRestore(t *testing.T) { restorerError: errors.New("blarg"), expectedErr: false, expectedRestoreUpdates: []*api.Restore{ - NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseInProgress).Restore, - NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseCompleted). + NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).Restore, + NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseCompleted). WithErrors(api.RestoreResult{ Namespaces: map[string][]string{ "ns-1": {"blarg"}, @@ -211,7 +211,7 @@ func TestProcessRestore(t *testing.T) { }). Restore, }, - expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseInProgress).Restore, + expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).Restore, }, { name: "valid restore gets executed", @@ -219,21 +219,10 @@ func TestProcessRestore(t *testing.T) { backup: NewTestBackup().WithName("backup-1").Backup, expectedErr: false, expectedRestoreUpdates: []*api.Restore{ - NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseInProgress).Restore, - NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseCompleted).Restore, + NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).Restore, + NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseCompleted).Restore, }, - expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseInProgress).Restore, - }, - { - name: "restore with no restorable namespaces gets defaulted to *", - restore: NewRestore("foo", "bar", "backup-1", "", "", api.RestorePhaseNew).Restore, - backup: NewTestBackup().WithName("backup-1").Backup, - expectedErr: false, - expectedRestoreUpdates: []*api.Restore{ - NewRestore("foo", "bar", "backup-1", "*", "*", api.RestorePhaseInProgress).Restore, - NewRestore("foo", "bar", "backup-1", "*", "*", api.RestorePhaseCompleted).Restore, - }, - expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "*", "*", api.RestorePhaseInProgress).Restore, + expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).Restore, }, { name: "valid restore with RestorePVs=true gets executed when allowRestoreSnapshots=true", @@ -242,10 +231,10 @@ func TestProcessRestore(t *testing.T) { allowRestoreSnapshots: true, expectedErr: false, expectedRestoreUpdates: []*api.Restore{ - NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseInProgress).WithRestorePVs(true).Restore, - NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseCompleted).WithRestorePVs(true).Restore, + NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).WithRestorePVs(true).Restore, + NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseCompleted).WithRestorePVs(true).Restore, }, - expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseInProgress).WithRestorePVs(true).Restore, + expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).WithRestorePVs(true).Restore, }, { name: "restore with RestorePVs=true fails validation when allowRestoreSnapshots=false", @@ -253,7 +242,7 @@ func TestProcessRestore(t *testing.T) { backup: NewTestBackup().WithName("backup-1").Backup, expectedErr: false, expectedRestoreUpdates: []*api.Restore{ - NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseFailedValidation). + NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseFailedValidation). WithRestorePVs(true). WithValidationError("Server is not configured for PV snapshot restores"). Restore, diff --git a/pkg/restore/restore.go b/pkg/restore/restore.go index f32077db5..29e357ef3 100644 --- a/pkg/restore/restore.go +++ b/pkg/restore/restore.go @@ -442,7 +442,7 @@ func (ctx *context) restoreResource(resource, namespace, resourcePath string) (a } var err error - resourceClient, err = ctx.dynamicFactory.ClientForGroupVersionKind(obj.GroupVersionKind(), resource, namespace) + resourceClient, err = ctx.dynamicFactory.ClientForGroupVersionResource(obj.GroupVersionKind().GroupVersion(), resource, namespace) if err != nil { addArkError(&errs, fmt.Errorf("error getting resource client for namespace %q, resource %q: %v", namespace, &groupResource, err)) return warnings, errs diff --git a/pkg/restore/restore_test.go b/pkg/restore/restore_test.go index d207e0a99..184e741f9 100644 --- a/pkg/restore/restore_test.go +++ b/pkg/restore/restore_test.go @@ -405,8 +405,8 @@ func TestRestoreResourceForNamespace(t *testing.T) { dynamicFactory := &FakeDynamicFactory{} resource := metav1.APIResource{Name: "configmaps", Namespaced: true} - gvk := schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"} - dynamicFactory.On("ClientForGroupVersionKind", gvk, resource, test.namespace).Return(resourceClient, nil) + gv := schema.GroupVersion{Group: "", Version: "v1"} + dynamicFactory.On("ClientForGroupVersionResource", gv, resource, test.namespace).Return(resourceClient, nil) log, _ := testlogger.NewNullLogger() diff --git a/pkg/util/collections/includes_excludes.go b/pkg/util/collections/includes_excludes.go index 668a2193a..0f0009c96 100644 --- a/pkg/util/collections/includes_excludes.go +++ b/pkg/util/collections/includes_excludes.go @@ -17,6 +17,8 @@ limitations under the License. package collections import ( + "strings" + "github.com/pkg/errors" "k8s.io/apimachinery/pkg/util/sets" @@ -70,13 +72,33 @@ func (ie *IncludesExcludes) ShouldInclude(s string) bool { return false } - return ie.includes.Has("*") || ie.includes.Has(s) + // len=0 means include everything + return ie.includes.Len() == 0 || ie.includes.Has("*") || ie.includes.Has(s) } -// IncludeEverything returns true if the Includes list is '*' -// and the Excludes list is empty, or false otherwise. +// IncludesString returns a string containing all of the includes, separated by commas, or * if the +// list is empty. +func (ie *IncludesExcludes) IncludesString() string { + return asString(ie.GetIncludes()) +} + +// ExcludesString returns a string containing all of the excludes, separated by commas, or * if the +// list is empty. +func (ie *IncludesExcludes) ExcludesString() string { + return asString(ie.GetExcludes()) +} + +func asString(in []string) string { + if len(in) == 0 { + return "*" + } + return strings.Join(in, ", ") +} + +// IncludeEverything returns true if the includes list is empty or '*' +// and the excludes list is empty, or false otherwise. func (ie *IncludesExcludes) IncludeEverything() bool { - return ie.excludes.Len() == 0 && ie.includes.Len() == 1 && ie.includes.Has("*") + return ie.excludes.Len() == 0 && (ie.includes.Len() == 0 || (ie.includes.Len() == 1 && ie.includes.Has("*"))) } // ValidateIncludesExcludes checks provided lists of included and excluded @@ -91,10 +113,6 @@ func ValidateIncludesExcludes(includesList, excludesList []string) []error { includes := sets.NewString(includesList...) excludes := sets.NewString(excludesList...) - if includes.Len() == 0 { - errs = append(errs, errors.New("includes list cannot be empty")) - } - if includes.Len() > 1 && includes.Has("*") { errs = append(errs, errors.New("includes list must either contain '*' only, or a non-empty list of items")) } diff --git a/pkg/util/collections/includes_excludes_test.go b/pkg/util/collections/includes_excludes_test.go index bc87746b5..d51c4524b 100644 --- a/pkg/util/collections/includes_excludes_test.go +++ b/pkg/util/collections/includes_excludes_test.go @@ -34,9 +34,9 @@ func TestShouldInclude(t *testing.T) { should bool }{ { - name: "empty - don't include anything", + name: "empty - include everything", check: "foo", - should: false, + should: true, }, { name: "include *", @@ -97,9 +97,8 @@ func TestValidateIncludesExcludes(t *testing.T) { expected []error }{ { - name: "include nothing not allowed", + name: "empty includes (everything) is allowed", includes: []string{}, - expected: []error{errors.New("includes list cannot be empty")}, }, { name: "include everything", diff --git a/pkg/util/kube/utils.go b/pkg/util/kube/utils.go index 5a7171cf5..74ad9120b 100644 --- a/pkg/util/kube/utils.go +++ b/pkg/util/kube/utils.go @@ -32,12 +32,10 @@ import ( ) // NamespaceAndName returns a string in the format / -func NamespaceAndName(metaAccessor metav1.ObjectMetaAccessor) string { - objMeta := metaAccessor.GetObjectMeta() - if objMeta == nil { - return "" +func NamespaceAndName(objMeta metav1.Object) string { + if objMeta.GetNamespace() == "" { + return objMeta.GetName() } - return fmt.Sprintf("%s/%s", objMeta.GetNamespace(), objMeta.GetName()) } diff --git a/pkg/util/test/fake_dynamic.go b/pkg/util/test/fake_dynamic.go index f4ee1e6fb..1c2b455d3 100644 --- a/pkg/util/test/fake_dynamic.go +++ b/pkg/util/test/fake_dynamic.go @@ -34,13 +34,8 @@ type FakeDynamicFactory struct { var _ client.DynamicFactory = &FakeDynamicFactory{} -func (df *FakeDynamicFactory) ClientForGroupVersionResource(gvr schema.GroupVersionResource, resource metav1.APIResource, namespace string) (client.Dynamic, error) { - args := df.Called(gvr, resource, namespace) - return args.Get(0).(client.Dynamic), args.Error(1) -} - -func (df *FakeDynamicFactory) ClientForGroupVersionKind(gvk schema.GroupVersionKind, resource metav1.APIResource, namespace string) (client.Dynamic, error) { - args := df.Called(gvk, resource, namespace) +func (df *FakeDynamicFactory) ClientForGroupVersionResource(gv schema.GroupVersion, resource metav1.APIResource, namespace string) (client.Dynamic, error) { + args := df.Called(gv, resource, namespace) return args.Get(0).(client.Dynamic), args.Error(1) } diff --git a/pkg/util/test/fake_mapper.go b/pkg/util/test/fake_mapper.go index e12dc7eb1..8fff86b6f 100644 --- a/pkg/util/test/fake_mapper.go +++ b/pkg/util/test/fake_mapper.go @@ -17,8 +17,7 @@ limitations under the License. package test import ( - "errors" - + "github.com/pkg/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime/schema" ) @@ -38,12 +37,12 @@ func (m *FakeMapper) ResourceFor(input schema.GroupVersionResource) (schema.Grou }, nil } if m.Resources == nil { - return schema.GroupVersionResource{}, errors.New("invalid resource") + return schema.GroupVersionResource{}, errors.Errorf("invalid resource %q", input.String()) } if gr, found := m.Resources[input]; found { return gr, nil } - return schema.GroupVersionResource{}, errors.New("invalid resource") + return schema.GroupVersionResource{}, errors.Errorf("invalid resource %q", input.String()) } diff --git a/pkg/util/test/test_logger.go b/pkg/util/test/test_logger.go new file mode 100644 index 000000000..fc963698d --- /dev/null +++ b/pkg/util/test/test_logger.go @@ -0,0 +1,29 @@ +/* +Copyright 2017 the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "io/ioutil" + + "github.com/sirupsen/logrus" +) + +func NewLogger() *logrus.Entry { + logger := logrus.New() + logger.Out = ioutil.Discard + return logrus.NewEntry(logger) +}