diff --git a/CHANGELOG.md b/CHANGELOG.md index a473b8ff6..7eac68771 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,15 @@ # Changelog +#### [v0.9.7](https://github.com/heptio/ark/releases/tag/v0.9.7) - 2018-10-04 + +#### Bug Fixes + * Preserve explicitly-specified node ports during restore (#712, @timoreimann) + * Enable restoring resources with ownerReference set (#837, @mwieczorek) + * Fix error when restoring ExternalName services (#869, @shubheksha) + * remove restore log helper for accurate line numbers (#891, @skriss) + * Display backup StartTimestamp in `ark backup get` output (#894, @marctc) + * Fix restic restores when using namespace mappings (#900, @skriss) + #### [v0.9.6](https://github.com/heptio/ark/releases/tag/v0.9.6) - 2018-09-21 #### Bug Fixes diff --git a/pkg/cmd/server/server.go b/pkg/cmd/server/server.go index 2eebbb936..c631dbecc 100644 --- a/pkg/cmd/server/server.go +++ b/pkg/cmd/server/server.go @@ -381,6 +381,7 @@ var defaultResourcePriorities = []string{ "serviceaccounts", "limitranges", "pods", + "replicaset", } func applyConfigDefaults(c *api.Config, logger logrus.FieldLogger) { diff --git a/pkg/cmd/util/output/backup_printer.go b/pkg/cmd/util/output/backup_printer.go index 17153d223..aa9b05b43 100644 --- a/pkg/cmd/util/output/backup_printer.go +++ b/pkg/cmd/util/output/backup_printer.go @@ -92,7 +92,7 @@ func printBackup(backup *v1.Backup, w io.Writer, options printers.PrintOptions) status = "Deleting" } - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s", name, status, backup.CreationTimestamp.Time, humanReadableTimeFromNow(expiration), metav1.FormatLabelSelector(backup.Spec.LabelSelector)); err != nil { + if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s", name, status, backup.Status.StartTimestamp.Time, humanReadableTimeFromNow(expiration), metav1.FormatLabelSelector(backup.Spec.LabelSelector)); err != nil { return err } diff --git a/pkg/restic/restorer.go b/pkg/restic/restorer.go index 6d538b735..a618390bd 100644 --- a/pkg/restic/restorer.go +++ b/pkg/restic/restorer.go @@ -34,7 +34,7 @@ import ( // Restorer can execute restic restores of volumes in a pod. type Restorer interface { // RestorePodVolumes restores all annotated volumes in a pod. - RestorePodVolumes(restore *arkv1api.Restore, pod *corev1api.Pod, log logrus.FieldLogger) []error + RestorePodVolumes(restore *arkv1api.Restore, pod *corev1api.Pod, sourceNamespace string, log logrus.FieldLogger) []error } type restorer struct { @@ -84,14 +84,14 @@ func newRestorer( return r } -func (r *restorer) RestorePodVolumes(restore *arkv1api.Restore, pod *corev1api.Pod, log logrus.FieldLogger) []error { +func (r *restorer) RestorePodVolumes(restore *arkv1api.Restore, pod *corev1api.Pod, sourceNamespace string, log logrus.FieldLogger) []error { // get volumes to restore from pod's annotations volumesToRestore := GetPodSnapshotAnnotations(pod) if len(volumesToRestore) == 0 { return nil } - repo, err := r.repoEnsurer.EnsureRepo(r.ctx, restore.Namespace, pod.Namespace) + repo, err := r.repoEnsurer.EnsureRepo(r.ctx, restore.Namespace, sourceNamespace) if err != nil { return []error{err} } diff --git a/pkg/restore/restore.go b/pkg/restore/restore.go index 22d404438..7be73de48 100644 --- a/pkg/restore/restore.go +++ b/pkg/restore/restore.go @@ -243,7 +243,7 @@ func (kr *kubernetesRestorer) Restore(restore *api.Restore, backup *api.Backup, restore: restore, prioritizedResources: prioritizedResources, selector: selector, - logger: log, + log: log, dynamicFactory: kr.dynamicFactory, fileSystem: kr.fileSystem, namespaceClient: kr.namespaceClient, @@ -324,7 +324,7 @@ type context struct { restore *api.Restore prioritizedResources []schema.GroupResource selector labels.Selector - logger logrus.FieldLogger + log logrus.FieldLogger dynamicFactory client.DynamicFactory fileSystem FileSystem namespaceClient corev1.NamespaceInterface @@ -338,16 +338,12 @@ type context struct { pvRestorer PVRestorer } -func (ctx *context) infof(msg string, args ...interface{}) { - ctx.logger.Infof(msg, args...) -} - func (ctx *context) execute() (api.RestoreResult, api.RestoreResult) { - ctx.infof("Starting restore of backup %s", kube.NamespaceAndName(ctx.backup)) + ctx.log.Infof("Starting restore of backup %s", kube.NamespaceAndName(ctx.backup)) dir, err := ctx.unzipAndExtractBackup(ctx.backupReader) if err != nil { - ctx.infof("error unzipping and extracting: %v", err) + ctx.log.Infof("error unzipping and extracting: %v", err) return api.RestoreResult{}, api.RestoreResult{Ark: []string{err.Error()}} } defer ctx.fileSystem.RemoveAll(dir) @@ -452,7 +448,7 @@ func (ctx *context) restoreFromDir(dir string) (api.RestoreResult, api.RestoreRe nsPath := filepath.Join(nsSubDir, nsName) if !namespaceFilter.ShouldInclude(nsName) { - ctx.infof("Skipping namespace %s", nsName) + ctx.log.Infof("Skipping namespace %s", nsName) continue } @@ -467,7 +463,7 @@ func (ctx *context) restoreFromDir(dir string) (api.RestoreResult, api.RestoreRe // (in order to get any backed-up metadata), but if we don't find it there, // create a blank one. if !existingNamespaces.Has(mappedNsName) { - logger := ctx.logger.WithField("namespace", nsName) + logger := ctx.log.WithField("namespace", nsName) ns := getNamespace(logger, filepath.Join(dir, api.ResourcesDir, "namespaces", api.ClusterScopedDir, nsName+".json"), mappedNsName) if _, err := kube.EnsureNamespaceExists(ns, ctx.namespaceClient); err != nil { addArkError(&errs, err) @@ -485,15 +481,15 @@ func (ctx *context) restoreFromDir(dir string) (api.RestoreResult, api.RestoreRe } // TODO timeout? - ctx.logger.Debugf("Waiting on resource wait group for resource=%s", resource.String()) + ctx.log.Debugf("Waiting on resource wait group for resource=%s", resource.String()) ctx.resourceWaitGroup.Wait() - ctx.logger.Debugf("Done waiting on resource wait group for resource=%s", resource.String()) + ctx.log.Debugf("Done waiting on resource wait group for resource=%s", resource.String()) } // TODO timeout? - ctx.logger.Debug("Waiting on global wait group") + ctx.log.Debug("Waiting on global wait group") waitErrs := ctx.globalWaitGroup.Wait() - ctx.logger.Debug("Done waiting on global wait group") + ctx.log.Debug("Done waiting on global wait group") for _, err := range waitErrs { // TODO not ideal to be adding these to Ark-level errors @@ -579,14 +575,14 @@ func (ctx *context) restoreResource(resource, namespace, resourcePath string) (a warnings, errs := api.RestoreResult{}, api.RestoreResult{} if ctx.restore.Spec.IncludeClusterResources != nil && !*ctx.restore.Spec.IncludeClusterResources && namespace == "" { - ctx.infof("Skipping resource %s because it's cluster-scoped", resource) + ctx.log.Infof("Skipping resource %s because it's cluster-scoped", resource) return warnings, errs } if namespace != "" { - ctx.infof("Restoring resource '%s' into namespace '%s' from: %s", resource, namespace, resourcePath) + ctx.log.Infof("Restoring resource '%s' into namespace '%s' from: %s", resource, namespace, resourcePath) } else { - ctx.infof("Restoring cluster level resource '%s' from: %s", resource, resourcePath) + ctx.log.Infof("Restoring cluster level resource '%s' from: %s", resource, resourcePath) } files, err := ctx.fileSystem.ReadDir(resourcePath) @@ -631,29 +627,20 @@ func (ctx *context) restoreResource(resource, namespace, resourcePath string) (a continue } - if hasControllerOwner(obj.GetOwnerReferences()) { - // non-pods with controller owners shouldn't be restored; pods with controller - // owners should only be restored if they have restic snapshots to restore - if groupResource != kuberesource.Pods || !restic.PodHasSnapshotAnnotation(obj) { - ctx.infof("%s has a controller owner - skipping", kube.NamespaceAndName(obj)) - continue - } - } - complete, err := isCompleted(obj, groupResource) if err != nil { addToResult(&errs, namespace, fmt.Errorf("error checking completion %q: %v", fullPath, err)) continue } if complete { - ctx.infof("%s is complete - skipping", kube.NamespaceAndName(obj)) + ctx.log.Infof("%s is complete - skipping", kube.NamespaceAndName(obj)) continue } if resourceClient == nil { // initialize client for this Resource. we need // metadata from an object to do this. - ctx.infof("Getting client for %v", obj.GroupVersionKind()) + ctx.log.Infof("Getting client for %v", obj.GroupVersionKind()) resource := metav1.APIResource{ Namespaced: len(namespace) > 0, @@ -672,7 +659,7 @@ func (ctx *context) restoreResource(resource, namespace, resourcePath string) (a // TODO: move to restore item action if/when we add a ShouldRestore() method to the interface if groupResource == kuberesource.Pods && obj.GetAnnotations()[v1.MirrorPodAnnotationKey] != "" { - ctx.infof("Not restoring pod because it's a mirror pod") + ctx.log.Infof("Not restoring pod because it's a mirror pod") continue } @@ -680,7 +667,7 @@ func (ctx *context) restoreResource(resource, namespace, resourcePath string) (a _, found := ctx.backup.Status.VolumeBackups[name] reclaimPolicy, err := collections.GetString(obj.Object, "spec.persistentVolumeReclaimPolicy") if err == nil && !found && reclaimPolicy == "Delete" { - ctx.infof("Not restoring PV because it doesn't have a snapshot and its reclaim policy is Delete.") + ctx.log.Infof("Not restoring PV because it doesn't have a snapshot and its reclaim policy is Delete.") ctx.pvsToProvision.Insert(name) @@ -706,8 +693,8 @@ func (ctx *context) restoreResource(resource, namespace, resourcePath string) (a go func() { defer ctx.resourceWaitGroup.Done() - if _, err := waitForReady(resourceWatch.ResultChan(), name, isPVReady, time.Minute, ctx.logger); err != nil { - ctx.logger.Warnf("Timeout reached waiting for persistent volume %s to become ready", name) + if _, err := waitForReady(resourceWatch.ResultChan(), name, isPVReady, time.Minute, ctx.log); err != nil { + ctx.log.Warnf("Timeout reached waiting for persistent volume %s to become ready", name) addArkError(&warnings, fmt.Errorf("timeout reached waiting for persistent volume %s to become ready", name)) } }() @@ -722,7 +709,7 @@ func (ctx *context) restoreResource(resource, namespace, resourcePath string) (a } if volumeName, exists := spec["volumeName"]; exists && ctx.pvsToProvision.Has(volumeName.(string)) { - ctx.infof("Resetting PersistentVolumeClaim %s/%s for dynamic provisioning because its PV %v has a reclaim policy of Delete", namespace, name, volumeName) + ctx.log.Infof("Resetting PersistentVolumeClaim %s/%s for dynamic provisioning because its PV %v has a reclaim policy of Delete", namespace, name, volumeName) delete(spec, "volumeName") @@ -738,10 +725,10 @@ func (ctx *context) restoreResource(resource, namespace, resourcePath string) (a continue } - ctx.infof("Executing item action for %v", &groupResource) + ctx.log.Infof("Executing item action for %v", &groupResource) if logSetter, ok := action.ItemAction.(logging.LogSetter); ok { - logSetter.SetLog(ctx.logger) + logSetter.SetLog(ctx.log) } updatedObj, warning, err := action.Execute(obj, ctx.restore) @@ -770,6 +757,7 @@ func (ctx *context) restoreResource(resource, namespace, resourcePath string) (a // necessary because we may have remapped the namespace // if the namespace is blank, don't create the key + originalNamespace := obj.GetNamespace() if namespace != "" { obj.SetNamespace(namespace) } @@ -777,19 +765,19 @@ func (ctx *context) restoreResource(resource, namespace, resourcePath string) (a // add an ark-restore label to each resource for easy ID addLabel(obj, api.RestoreLabelKey, ctx.restore.Name) - ctx.infof("Restoring %s: %v", obj.GroupVersionKind().Kind, name) + ctx.log.Infof("Restoring %s: %v", obj.GroupVersionKind().Kind, name) createdObj, restoreErr := resourceClient.Create(obj) if apierrors.IsAlreadyExists(restoreErr) { fromCluster, err := resourceClient.Get(name, metav1.GetOptions{}) if err != nil { - ctx.infof("Error retrieving cluster version of %s: %v", kube.NamespaceAndName(obj), err) + ctx.log.Infof("Error retrieving cluster version of %s: %v", kube.NamespaceAndName(obj), err) addToResult(&warnings, namespace, err) continue } // Remove insubstantial metadata fromCluster, err = resetMetadataAndStatus(fromCluster) if err != nil { - ctx.infof("Error trying to reset metadata for %s: %v", kube.NamespaceAndName(obj), err) + ctx.log.Infof("Error trying to reset metadata for %s: %v", kube.NamespaceAndName(obj), err) addToResult(&warnings, namespace, err) continue } @@ -804,14 +792,14 @@ func (ctx *context) restoreResource(resource, namespace, resourcePath string) (a case kuberesource.ServiceAccounts: desired, err := mergeServiceAccounts(fromCluster, obj) if err != nil { - ctx.infof("error merging secrets for ServiceAccount %s: %v", kube.NamespaceAndName(obj), err) + ctx.log.Infof("error merging secrets for ServiceAccount %s: %v", kube.NamespaceAndName(obj), err) addToResult(&warnings, namespace, err) continue } patchBytes, err := generatePatch(fromCluster, desired) if err != nil { - ctx.infof("error generating patch for ServiceAccount %s: %v", kube.NamespaceAndName(obj), err) + ctx.log.Infof("error generating patch for ServiceAccount %s: %v", kube.NamespaceAndName(obj), err) addToResult(&warnings, namespace, err) continue } @@ -825,7 +813,7 @@ func (ctx *context) restoreResource(resource, namespace, resourcePath string) (a if err != nil { addToResult(&warnings, namespace, err) } else { - ctx.infof("ServiceAccount %s successfully updated", kube.NamespaceAndName(obj)) + ctx.log.Infof("ServiceAccount %s successfully updated", kube.NamespaceAndName(obj)) } default: e := errors.Errorf("not restored: %s and is different from backed up version.", restoreErr) @@ -836,24 +824,24 @@ func (ctx *context) restoreResource(resource, namespace, resourcePath string) (a } // Error was something other than an AlreadyExists if restoreErr != nil { - ctx.infof("error restoring %s: %v", name, err) + ctx.log.Infof("error restoring %s: %v", name, err) addToResult(&errs, namespace, fmt.Errorf("error restoring %s: %v", fullPath, restoreErr)) continue } if groupResource == kuberesource.Pods && len(restic.GetPodSnapshotAnnotations(obj)) > 0 { if ctx.resticRestorer == nil { - ctx.logger.Warn("No restic restorer, not restoring pod's volumes") + ctx.log.Warn("No restic restorer, not restoring pod's volumes") } else { ctx.globalWaitGroup.GoErrorSlice(func() []error { pod := new(v1.Pod) if err := runtime.DefaultUnstructuredConverter.FromUnstructured(createdObj.UnstructuredContent(), &pod); err != nil { - ctx.logger.WithError(err).Error("error converting unstructured pod") + ctx.log.WithError(err).Error("error converting unstructured pod") return []error{err} } - if errs := ctx.resticRestorer.RestorePodVolumes(ctx.restore, pod, ctx.logger); errs != nil { - ctx.logger.WithError(kubeerrs.NewAggregate(errs)).Error("unable to successfully complete restic restores of pod's volumes") + if errs := ctx.resticRestorer.RestorePodVolumes(ctx.restore, pod, originalNamespace, ctx.log); errs != nil { + ctx.log.WithError(kubeerrs.NewAggregate(errs)).Error("unable to successfully complete restic restores of pod's volumes") return errs } @@ -1084,7 +1072,7 @@ func (ctx *context) unmarshal(filePath string) (*unstructured.Unstructured, erro func (ctx *context) unzipAndExtractBackup(src io.Reader) (string, error) { gzr, err := gzip.NewReader(src) if err != nil { - ctx.infof("error creating gzip reader: %v", err) + ctx.log.Infof("error creating gzip reader: %v", err) return "", err } defer gzr.Close() @@ -1097,7 +1085,7 @@ func (ctx *context) unzipAndExtractBackup(src io.Reader) (string, error) { func (ctx *context) readBackup(tarRdr *tar.Reader) (string, error) { dir, err := ctx.fileSystem.TempDir("", "") if err != nil { - ctx.infof("error creating temp dir: %v", err) + ctx.log.Infof("error creating temp dir: %v", err) return "", err } @@ -1108,7 +1096,7 @@ func (ctx *context) readBackup(tarRdr *tar.Reader) (string, error) { break } if err != nil { - ctx.infof("error reading tar: %v", err) + ctx.log.Infof("error reading tar: %v", err) return "", err } @@ -1118,7 +1106,7 @@ func (ctx *context) readBackup(tarRdr *tar.Reader) (string, error) { case tar.TypeDir: err := ctx.fileSystem.MkdirAll(target, header.FileInfo().Mode()) if err != nil { - ctx.infof("mkdirall error: %v", err) + ctx.log.Infof("mkdirall error: %v", err) return "", err } @@ -1126,7 +1114,7 @@ func (ctx *context) readBackup(tarRdr *tar.Reader) (string, error) { // make sure we have the directory created err := ctx.fileSystem.MkdirAll(filepath.Dir(target), header.FileInfo().Mode()) if err != nil { - ctx.infof("mkdirall error: %v", err) + ctx.log.Infof("mkdirall error: %v", err) return "", err } @@ -1138,7 +1126,7 @@ func (ctx *context) readBackup(tarRdr *tar.Reader) (string, error) { defer file.Close() if _, err := io.Copy(file, tarRdr); err != nil { - ctx.infof("error copying: %v", err) + ctx.log.Infof("error copying: %v", err) return "", err } } diff --git a/pkg/restore/restore_test.go b/pkg/restore/restore_test.go index a374888d7..8fdbac361 100644 --- a/pkg/restore/restore_test.go +++ b/pkg/restore/restore_test.go @@ -189,7 +189,7 @@ func TestRestoreNamespaceFiltering(t *testing.T) { restore: test.restore, namespaceClient: &fakeNamespaceClient{}, fileSystem: test.fileSystem, - logger: log, + log: log, prioritizedResources: test.prioritizedResources, } @@ -282,7 +282,7 @@ func TestRestorePriority(t *testing.T) { namespaceClient: &fakeNamespaceClient{}, fileSystem: test.fileSystem, prioritizedResources: test.prioritizedResources, - logger: log, + log: log, } warnings, errors := ctx.restoreFromDir(test.baseDir) @@ -330,7 +330,7 @@ func TestNamespaceRemapping(t *testing.T) { prioritizedResources: prioritizedResources, restore: restore, backup: &api.Backup{}, - logger: arktest.NewLogger(), + log: arktest.NewLogger(), } warnings, errors := ctx.restoreFromDir(baseDir) @@ -432,16 +432,6 @@ func TestRestoreResourceForNamespace(t *testing.T) { labelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"foo": "not-bar"})), fileSystem: arktest.NewFakeFileSystem().WithFile("configmaps/cm-1.json", newTestConfigMap().WithLabels(map[string]string{"foo": "bar"}).ToJSON()), }, - { - name: "items with controller owner are skipped", - namespace: "ns-1", - resourcePath: "configmaps", - labelSelector: labels.NewSelector(), - fileSystem: arktest.NewFakeFileSystem(). - WithFile("configmaps/cm-1.json", newTestConfigMap().WithControllerOwner().ToJSON()). - WithFile("configmaps/cm-2.json", newNamedTestConfigMap("cm-2").ToJSON()), - expectedObjs: toUnstructured(newNamedTestConfigMap("cm-2").WithArkLabel("my-restore").ConfigMap), - }, { name: "namespace is remapped", namespace: "ns-2", @@ -628,7 +618,7 @@ func TestRestoreResourceForNamespace(t *testing.T) { }, }, backup: &api.Backup{}, - logger: arktest.NewLogger(), + log: arktest.NewLogger(), pvRestorer: &pvRestorer{}, } @@ -714,7 +704,7 @@ func TestRestoringExistingServiceAccount(t *testing.T) { }, }, backup: &api.Backup{}, - logger: arktest.NewLogger(), + log: arktest.NewLogger(), } warnings, errors := ctx.restoreResource("serviceaccounts", "ns-1", "foo/resources/serviceaccounts/namespaces/ns-1/") @@ -900,7 +890,7 @@ status: }, }, backup: backup, - logger: arktest.NewLogger(), + log: arktest.NewLogger(), pvsToProvision: sets.NewString(), pvRestorer: pvRestorer, } @@ -1404,9 +1394,18 @@ func (obj *testUnstructured) WithStatusField(field string, value interface{}) *t } func (obj *testUnstructured) WithAnnotations(fields ...string) *testUnstructured { - annotations := make(map[string]interface{}) + vals := map[string]string{} for _, field := range fields { - annotations[field] = "foo" + vals[field] = "foo" + } + + return obj.WithAnnotationValues(vals) +} + +func (obj *testUnstructured) WithAnnotationValues(fieldVals map[string]string) *testUnstructured { + annotations := make(map[string]interface{}) + for field, val := range fieldVals { + annotations[field] = val } obj = obj.WithMetadataField("annotations", annotations) diff --git a/pkg/restore/service_action.go b/pkg/restore/service_action.go index 3f48b91de..9395a0fe1 100644 --- a/pkg/restore/service_action.go +++ b/pkg/restore/service_action.go @@ -17,14 +17,21 @@ limitations under the License. package restore import ( + "encoding/json" + + "github.com/pkg/errors" "github.com/sirupsen/logrus" + corev1api "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" api "github.com/heptio/ark/pkg/apis/ark/v1" "github.com/heptio/ark/pkg/util/collections" ) +const annotationLastAppliedConfig = "kubectl.kubernetes.io/last-applied-configuration" + type serviceAction struct { log logrus.FieldLogger } @@ -52,15 +59,55 @@ func (a *serviceAction) Execute(obj runtime.Unstructured, restore *api.Restore) delete(spec, "clusterIP") } - ports, err := collections.GetSlice(obj.UnstructuredContent(), "spec.ports") + err = deleteNodePorts(obj, &spec) if err != nil { return nil, nil, err } + return obj, nil, nil +} + +func getPreservedPorts(obj runtime.Unstructured) (map[string]bool, error) { + preservedPorts := map[string]bool{} + metadata, err := meta.Accessor(obj) + if err != nil { + return nil, errors.WithStack(err) + } + if lac, ok := metadata.GetAnnotations()[annotationLastAppliedConfig]; ok { + var svc corev1api.Service + if err := json.Unmarshal([]byte(lac), &svc); err != nil { + return nil, errors.WithStack(err) + } + for _, port := range svc.Spec.Ports { + if port.NodePort > 0 { + preservedPorts[port.Name] = true + } + } + } + return preservedPorts, nil +} + +func deleteNodePorts(obj runtime.Unstructured, spec *map[string]interface{}) error { + preservedPorts, err := getPreservedPorts(obj) + if err != nil { + return err + } + + ports, err := collections.GetSlice(obj.UnstructuredContent(), "spec.ports") + serviceType, _ := collections.GetString(*spec, "type") + if err != nil && serviceType != "ExternalName" { + return err + } for _, port := range ports { p := port.(map[string]interface{}) + var name string + if nameVal, ok := p["name"]; ok { + name = nameVal.(string) + } + if preservedPorts[name] { + continue + } delete(p, "nodePort") } - - return obj, nil, nil + return nil } diff --git a/pkg/restore/service_action_test.go b/pkg/restore/service_action_test.go index 3e6ed173b..7b9198756 100644 --- a/pkg/restore/service_action_test.go +++ b/pkg/restore/service_action_test.go @@ -17,15 +17,33 @@ limitations under the License. package restore import ( + "encoding/json" "testing" arktest "github.com/heptio/ark/pkg/util/test" "github.com/stretchr/testify/assert" + corev1api "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" ) +func svcJSON(ports ...corev1api.ServicePort) string { + svc := corev1api.Service{ + Spec: corev1api.ServiceSpec{ + Ports: ports, + }, + } + + data, err := json.Marshal(svc) + if err != nil { + panic(err) + } + + return string(data) +} + func TestServiceActionExecute(t *testing.T) { + tests := []struct { name string obj runtime.Unstructured @@ -37,6 +55,11 @@ func TestServiceActionExecute(t *testing.T) { obj: NewTestUnstructured().WithName("svc-1").Unstructured, expectedErr: true, }, + { + name: "no spec ports should error", + obj: NewTestUnstructured().WithName("svc-1").WithSpec().Unstructured, + expectedErr: true, + }, { name: "clusterIP (only) should be deleted from spec", obj: NewTestUnstructured().WithName("svc-1").WithSpec("clusterIP", "foo").WithSpecField("ports", []interface{}{}).Unstructured, @@ -63,6 +86,97 @@ func TestServiceActionExecute(t *testing.T) { map[string]interface{}{"foo": "bar"}, }).Unstructured, }, + { + name: "unnamed nodePort should be deleted when missing in annotation", + obj: NewTestUnstructured().WithName("svc-1"). + WithAnnotationValues(map[string]string{ + annotationLastAppliedConfig: svcJSON(), + }). + WithSpecField("ports", []interface{}{ + map[string]interface{}{"nodePort": 8080}, + }).Unstructured, + expectedErr: false, + expectedRes: NewTestUnstructured().WithName("svc-1"). + WithAnnotationValues(map[string]string{ + annotationLastAppliedConfig: svcJSON(), + }). + WithSpecField("ports", []interface{}{ + map[string]interface{}{}, + }).Unstructured, + }, + { + name: "unnamed nodePort should be preserved when specified in annotation", + obj: NewTestUnstructured().WithName("svc-1"). + WithAnnotationValues(map[string]string{ + annotationLastAppliedConfig: svcJSON(corev1api.ServicePort{NodePort: 8080}), + }). + WithSpecField("ports", []interface{}{ + map[string]interface{}{ + "nodePort": 8080, + }, + }).Unstructured, + expectedErr: false, + expectedRes: NewTestUnstructured().WithName("svc-1"). + WithAnnotationValues(map[string]string{ + annotationLastAppliedConfig: svcJSON(corev1api.ServicePort{NodePort: 8080}), + }). + WithSpecField("ports", []interface{}{ + map[string]interface{}{ + "nodePort": 8080, + }, + }).Unstructured, + }, + { + name: "unnamed nodePort should be deleted when named nodePort specified in annotation", + obj: NewTestUnstructured().WithName("svc-1"). + WithAnnotationValues(map[string]string{ + annotationLastAppliedConfig: svcJSON(corev1api.ServicePort{Name: "http", NodePort: 8080}), + }). + WithSpecField("ports", []interface{}{ + map[string]interface{}{ + "nodePort": 8080, + }, + }).Unstructured, + expectedErr: false, + expectedRes: NewTestUnstructured().WithName("svc-1"). + WithAnnotationValues(map[string]string{ + annotationLastAppliedConfig: svcJSON(corev1api.ServicePort{Name: "http", NodePort: 8080}), + }). + WithSpecField("ports", []interface{}{ + map[string]interface{}{}, + }).Unstructured, + }, + { + name: "named nodePort should be preserved when specified in annotation", + obj: NewTestUnstructured().WithName("svc-1"). + WithAnnotationValues(map[string]string{ + annotationLastAppliedConfig: svcJSON(corev1api.ServicePort{Name: "http", NodePort: 8080}), + }). + WithSpecField("ports", []interface{}{ + map[string]interface{}{ + "name": "http", + "nodePort": 8080, + }, + map[string]interface{}{ + "name": "admin", + "nodePort": 9090, + }, + }).Unstructured, + expectedErr: false, + expectedRes: NewTestUnstructured().WithName("svc-1"). + WithAnnotationValues(map[string]string{ + annotationLastAppliedConfig: svcJSON(corev1api.ServicePort{Name: "http", NodePort: 8080}), + }). + WithSpecField("ports", []interface{}{ + map[string]interface{}{ + "name": "http", + "nodePort": 8080, + }, + map[string]interface{}{ + "name": "admin", + }, + }).Unstructured, + }, } for _, test := range tests {