update backup code to work with volume snapshot locations

Signed-off-by: Steve Kriss <steve@heptio.com>
This commit is contained in:
Steve Kriss
2018-09-26 16:18:45 -06:00
parent 4af89fa863
commit df07b7dc9f
13 changed files with 763 additions and 986 deletions

View File

@@ -18,24 +18,24 @@ package controller
import (
"bytes"
"encoding/json"
"fmt"
"io"
"sort"
"strings"
"testing"
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
core "k8s.io/client-go/testing"
"github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/backup"
pkgbackup "github.com/heptio/ark/pkg/backup"
"github.com/heptio/ark/pkg/generated/clientset/versioned/fake"
informers "github.com/heptio/ark/pkg/generated/informers/externalversions"
"github.com/heptio/ark/pkg/metrics"
@@ -43,7 +43,6 @@ import (
persistencemocks "github.com/heptio/ark/pkg/persistence/mocks"
"github.com/heptio/ark/pkg/plugin"
pluginmocks "github.com/heptio/ark/pkg/plugin/mocks"
"github.com/heptio/ark/pkg/util/collections"
"github.com/heptio/ark/pkg/util/logging"
arktest "github.com/heptio/ark/pkg/util/test"
)
@@ -52,384 +51,313 @@ type fakeBackupper struct {
mock.Mock
}
func (b *fakeBackupper) Backup(logger logrus.FieldLogger, backup *v1.Backup, backupFile io.Writer, actions []backup.ItemAction) error {
args := b.Called(logger, backup, backupFile, actions)
func (b *fakeBackupper) Backup(logger logrus.FieldLogger, backup *pkgbackup.Request, backupFile io.Writer, actions []pkgbackup.ItemAction, blockStoreGetter pkgbackup.BlockStoreGetter) error {
args := b.Called(logger, backup, backupFile, actions, blockStoreGetter)
return args.Error(0)
}
func TestProcessBackup(t *testing.T) {
func TestProcessBackupNonProcessedItems(t *testing.T) {
tests := []struct {
name string
key string
expectError bool
expectedIncludes []string
expectedExcludes []string
backup *arktest.TestBackup
expectBackup bool
allowSnapshots bool
defaultLocations map[string]string
name string
key string
backup *v1.Backup
expectedErr string
}{
{
name: "bad key",
name: "bad key returns error",
key: "bad/key/here",
expectError: true,
expectedErr: "error splitting queue key: unexpected key format: \"bad/key/here\"",
},
{
name: "lister failed",
key: "heptio-ark/backup1",
expectError: true,
name: "backup not found in lister returns error",
key: "nonexistent/backup",
expectedErr: "error getting backup: backup.ark.heptio.com \"backup\" not found",
},
{
name: "do not process phase FailedValidation",
key: "heptio-ark/backup1",
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseFailedValidation),
expectBackup: false,
name: "FailedValidation backup is not processed",
key: "heptio-ark/backup-1",
backup: arktest.NewTestBackup().WithName("backup-1").WithPhase(v1.BackupPhaseFailedValidation).Backup,
},
{
name: "do not process phase InProgress",
key: "heptio-ark/backup1",
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseInProgress),
expectBackup: false,
name: "InProgress backup is not processed",
key: "heptio-ark/backup-1",
backup: arktest.NewTestBackup().WithName("backup-1").WithPhase(v1.BackupPhaseInProgress).Backup,
},
{
name: "do not process phase Completed",
key: "heptio-ark/backup1",
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseCompleted),
expectBackup: false,
name: "Completed backup is not processed",
key: "heptio-ark/backup-1",
backup: arktest.NewTestBackup().WithName("backup-1").WithPhase(v1.BackupPhaseCompleted).Backup,
},
{
name: "do not process phase Failed",
key: "heptio-ark/backup1",
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseFailed),
expectBackup: false,
},
{
name: "do not process phase other",
key: "heptio-ark/backup1",
backup: arktest.NewTestBackup().WithName("backup1").WithPhase("arg"),
expectBackup: false,
},
{
name: "invalid included/excluded resources fails validation",
key: "heptio-ark/backup1",
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithIncludedResources("foo").WithExcludedResources("foo"),
expectBackup: false,
},
{
name: "invalid included/excluded namespaces fails validation",
key: "heptio-ark/backup1",
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithIncludedNamespaces("foo").WithExcludedNamespaces("foo"),
expectBackup: false,
},
{
name: "make sure specified included and excluded resources are honored",
key: "heptio-ark/backup1",
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithIncludedResources("i", "j").WithExcludedResources("k", "l"),
expectedIncludes: []string{"i", "j"},
expectedExcludes: []string{"k", "l"},
expectBackup: true,
},
{
name: "if includednamespaces are specified, don't default to *",
key: "heptio-ark/backup1",
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithIncludedNamespaces("ns-1"),
expectBackup: true,
},
{
name: "ttl",
key: "heptio-ark/backup1",
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithTTL(10 * time.Minute),
expectBackup: true,
},
{
name: "backup with SnapshotVolumes when allowSnapshots=false fails validation",
key: "heptio-ark/backup1",
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithSnapshotVolumes(true),
expectBackup: false,
},
{
name: "backup with SnapshotVolumes when allowSnapshots=true gets executed",
key: "heptio-ark/backup1",
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithSnapshotVolumes(true),
allowSnapshots: true,
expectBackup: true,
},
{
name: "Backup without a location will have it set to the default",
key: "heptio-ark/backup1",
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew),
expectBackup: true,
},
{
name: "Backup with a location completes",
key: "heptio-ark/backup1",
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithStorageLocation("loc1"),
expectBackup: true,
},
{
name: "Backup with non-existent location will fail validation",
key: "heptio-ark/backup1",
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithStorageLocation("loc2"),
expectBackup: false,
name: "Failed backup is not processed",
key: "heptio-ark/backup-1",
backup: arktest.NewTestBackup().WithName("backup-1").WithPhase(v1.BackupPhaseFailed).Backup,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
var (
client = fake.NewSimpleClientset()
backupper = &fakeBackupper{}
sharedInformers = informers.NewSharedInformerFactory(client, 0)
sharedInformers = informers.NewSharedInformerFactory(fake.NewSimpleClientset(), 0)
logger = logging.DefaultLogger(logrus.DebugLevel)
clockTime, _ = time.Parse("Mon Jan 2 15:04:05 2006", "Mon Jan 2 15:04:05 2006")
pluginManager = &pluginmocks.Manager{}
backupStore = &persistencemocks.BackupStore{}
)
defer backupper.AssertExpectations(t)
defer pluginManager.AssertExpectations(t)
defer backupStore.AssertExpectations(t)
c := NewBackupController(
sharedInformers.Ark().V1().Backups(),
client.ArkV1(),
backupper,
test.allowSnapshots,
logger,
logrus.InfoLevel,
func(logrus.FieldLogger) plugin.Manager { return pluginManager },
NewBackupTracker(),
sharedInformers.Ark().V1().BackupStorageLocations(),
"default",
sharedInformers.Ark().V1().VolumeSnapshotLocations(),
test.defaultLocations,
metrics.NewServerMetrics(),
).(*backupController)
c.clock = clock.NewFakeClock(clockTime)
c.newBackupStore = func(*v1.BackupStorageLocation, persistence.ObjectStoreGetter, logrus.FieldLogger) (persistence.BackupStore, error) {
return backupStore, nil
c := &backupController{
genericController: newGenericController("backup-test", logger),
lister: sharedInformers.Ark().V1().Backups().Lister(),
}
var expiration, startTime time.Time
if test.backup != nil {
// add directly to the informer's store so the lister can function and so we don't have to
// start the shared informers.
sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(test.backup.Backup)
startTime = c.clock.Now()
if test.backup.Spec.TTL.Duration > 0 {
expiration = c.clock.Now().Add(test.backup.Spec.TTL.Duration)
}
require.NoError(t, sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(test.backup))
}
if test.expectBackup {
// set up a Backup object to represent what we expect to be passed to backupper.Backup()
backup := test.backup.DeepCopy()
backup.Spec.IncludedResources = test.expectedIncludes
backup.Spec.ExcludedResources = test.expectedExcludes
backup.Spec.IncludedNamespaces = test.backup.Spec.IncludedNamespaces
backup.Spec.SnapshotVolumes = test.backup.Spec.SnapshotVolumes
backup.Status.Phase = v1.BackupPhaseInProgress
backup.Status.Expiration.Time = expiration
backup.Status.StartTimestamp.Time = startTime
backup.Status.Version = 1
backupper.On("Backup",
mock.Anything, // logger
backup,
mock.Anything, // backup file
mock.Anything, // actions
).Return(nil)
defaultLocation := &v1.BackupStorageLocation{
ObjectMeta: metav1.ObjectMeta{
Namespace: backup.Namespace,
Name: "default",
},
Spec: v1.BackupStorageLocationSpec{
Provider: "myCloud",
StorageType: v1.StorageType{
ObjectStorage: &v1.ObjectStorageLocation{
Bucket: "bucket",
},
},
},
}
loc1 := &v1.BackupStorageLocation{
ObjectMeta: metav1.ObjectMeta{
Namespace: backup.Namespace,
Name: "loc1",
},
Spec: v1.BackupStorageLocationSpec{
Provider: "myCloud",
StorageType: v1.StorageType{
ObjectStorage: &v1.ObjectStorageLocation{
Bucket: "bucket",
},
},
},
}
require.NoError(t, sharedInformers.Ark().V1().BackupStorageLocations().Informer().GetStore().Add(defaultLocation))
require.NoError(t, sharedInformers.Ark().V1().BackupStorageLocations().Informer().GetStore().Add(loc1))
pluginManager.On("GetBackupItemActions").Return(nil, nil)
// Ensure we have a CompletionTimestamp when uploading.
// Failures will display the bytes in buf.
completionTimestampIsPresent := func(buf *bytes.Buffer) bool {
json := buf.String()
timeString := `"completionTimestamp": "2006-01-02T15:04:05Z"`
return strings.Contains(json, timeString)
}
backupStore.On("PutBackup", test.backup.Name, mock.MatchedBy(completionTimestampIsPresent), mock.Anything, mock.Anything).Return(nil)
pluginManager.On("CleanupClients").Return()
}
// this is necessary so the Patch() call returns the appropriate object
client.PrependReactor("patch", "backups", func(action core.Action) (bool, runtime.Object, error) {
if test.backup == nil {
return true, nil, nil
}
patch := action.(core.PatchAction).GetPatch()
patchMap := make(map[string]interface{})
if err := json.Unmarshal(patch, &patchMap); err != nil {
t.Logf("error unmarshalling patch: %s\n", err)
return false, nil, err
}
phase, err := collections.GetString(patchMap, "status.phase")
if err != nil {
t.Logf("error getting status.phase: %s\n", err)
return false, nil, err
}
res := test.backup.DeepCopy()
// these are the fields that we expect to be set by
// the controller
res.Status.Version = 1
res.Status.Expiration.Time = expiration
res.Status.Phase = v1.BackupPhase(phase)
// If there's an error, it's mostly likely that the key wasn't found
// which is fine since not all patches will have them.
completionString, err := collections.GetString(patchMap, "status.completionTimestamp")
if err == nil {
completionTime, err := time.Parse(time.RFC3339Nano, completionString)
require.NoError(t, err, "unexpected completionTimestamp parsing error %v", err)
res.Status.CompletionTimestamp.Time = completionTime
}
startString, err := collections.GetString(patchMap, "status.startTimestamp")
if err == nil {
startTime, err := time.Parse(time.RFC3339Nano, startString)
require.NoError(t, err, "unexpected startTimestamp parsing error %v", err)
res.Status.StartTimestamp.Time = startTime
}
return true, res, nil
})
// method under test
err := c.processBackup(test.key)
if test.expectError {
require.Error(t, err, "processBackup should error")
return
}
require.NoError(t, err, "processBackup unexpected error: %v", err)
if !test.expectBackup {
// the AssertExpectations calls above make sure we aren't running anything we shouldn't be
return
}
actions := client.Actions()
require.Equal(t, 2, len(actions))
// structs and func for decoding patch content
type StatusPatch struct {
Expiration time.Time `json:"expiration"`
Version int `json:"version"`
Phase v1.BackupPhase `json:"phase"`
StartTimestamp metav1.Time `json:"startTimestamp"`
CompletionTimestamp metav1.Time `json:"completionTimestamp"`
}
type SpecPatch struct {
StorageLocation string `json:"storageLocation"`
}
type ObjectMetaPatch struct {
Labels map[string]string `json:"labels"`
}
type Patch struct {
Status StatusPatch `json:"status"`
Spec SpecPatch `json:"spec,omitempty"`
ObjectMeta ObjectMetaPatch `json:"metadata,omitempty"`
}
decode := func(decoder *json.Decoder) (interface{}, error) {
actual := new(Patch)
err := decoder.Decode(actual)
return *actual, err
}
// validate Patch call 1 (setting version, expiration, phase, and storage location)
var expected Patch
if test.backup.Spec.StorageLocation == "" {
expected = Patch{
Status: StatusPatch{
Version: 1,
Phase: v1.BackupPhaseInProgress,
Expiration: expiration,
},
Spec: SpecPatch{
StorageLocation: "default",
},
ObjectMeta: ObjectMetaPatch{
Labels: map[string]string{
v1.StorageLocationLabel: "default",
},
},
}
if test.expectedErr != "" {
require.Error(t, err)
assert.Equal(t, test.expectedErr, err.Error())
} else {
expected = Patch{
Status: StatusPatch{
Version: 1,
Phase: v1.BackupPhaseInProgress,
Expiration: expiration,
},
ObjectMeta: ObjectMetaPatch{
Labels: map[string]string{
v1.StorageLocationLabel: test.backup.Spec.StorageLocation,
},
},
}
assert.Nil(t, err)
}
arktest.ValidatePatch(t, actions[0], expected, decode)
// validate Patch call 2 (setting phase, startTimestamp, completionTimestamp)
expected = Patch{
Status: StatusPatch{
Phase: v1.BackupPhaseCompleted,
StartTimestamp: metav1.Time{Time: c.clock.Now()},
CompletionTimestamp: metav1.Time{Time: c.clock.Now()},
},
}
arktest.ValidatePatch(t, actions[1], expected, decode)
// Any backup that would actually proceed to validation will cause a segfault because this
// test hasn't set up the necessary controller dependencies for validation/etc. So the lack
// of segfaults during test execution here imply that backups are not being processed, which
// is what we expect.
})
}
}
func TestDefaultAndValidateSnapshotLocations(t *testing.T) {
defaultLocationsAWS := map[string]string{"aws": "aws-us-east-2"}
defaultLocationsFake := map[string]string{"fake-provider": "some-name"}
func TestProcessBackupValidationFailures(t *testing.T) {
defaultBackupLocation := arktest.NewTestBackupStorageLocation().WithName("loc-1").BackupStorageLocation
tests := []struct {
name string
backup *v1.Backup
backupLocation *v1.BackupStorageLocation
expectedErrs []string
}{
{
name: "invalid included/excluded resources fails validation",
backup: arktest.NewTestBackup().WithName("backup-1").WithIncludedResources("foo").WithExcludedResources("foo").Backup,
backupLocation: defaultBackupLocation,
expectedErrs: []string{"Invalid included/excluded resource lists: excludes list cannot contain an item in the includes list: foo"},
},
{
name: "invalid included/excluded namespaces fails validation",
backup: arktest.NewTestBackup().WithName("backup-1").WithIncludedNamespaces("foo").WithExcludedNamespaces("foo").Backup,
backupLocation: defaultBackupLocation,
expectedErrs: []string{"Invalid included/excluded namespace lists: excludes list cannot contain an item in the includes list: foo"},
},
{
name: "non-existent backup location fails validation",
backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("nonexistent").Backup,
expectedErrs: []string{"Error getting backup storage location: backupstoragelocation.ark.heptio.com \"nonexistent\" not found"},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
var (
clientset = fake.NewSimpleClientset(test.backup)
sharedInformers = informers.NewSharedInformerFactory(clientset, 0)
logger = logging.DefaultLogger(logrus.DebugLevel)
)
c := &backupController{
genericController: newGenericController("backup-test", logger),
client: clientset.ArkV1(),
lister: sharedInformers.Ark().V1().Backups().Lister(),
backupLocationLister: sharedInformers.Ark().V1().BackupStorageLocations().Lister(),
defaultBackupLocation: defaultBackupLocation.Name,
}
require.NotNil(t, test.backup)
require.NoError(t, sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(test.backup))
if test.backupLocation != nil {
_, err := clientset.ArkV1().BackupStorageLocations(test.backupLocation.Namespace).Create(test.backupLocation)
require.NoError(t, err)
require.NoError(t, sharedInformers.Ark().V1().BackupStorageLocations().Informer().GetStore().Add(test.backupLocation))
}
require.NoError(t, c.processBackup(fmt.Sprintf("%s/%s", test.backup.Namespace, test.backup.Name)))
res, err := clientset.ArkV1().Backups(test.backup.Namespace).Get(test.backup.Name, metav1.GetOptions{})
require.NoError(t, err)
assert.Equal(t, v1.BackupPhaseFailedValidation, res.Status.Phase)
assert.Equal(t, test.expectedErrs, res.Status.ValidationErrors)
// Any backup that would actually proceed to processing will cause a segfault because this
// test hasn't set up the necessary controller dependencies for running backups. So the lack
// of segfaults during test execution here imply that backups are not being processed, which
// is what we expect.
})
}
}
func TestProcessBackupCompletions(t *testing.T) {
defaultBackupLocation := arktest.NewTestBackupStorageLocation().WithName("loc-1").BackupStorageLocation
now, err := time.Parse(time.RFC1123Z, time.RFC1123Z)
require.NoError(t, err)
now = now.Local()
tests := []struct {
name string
backup *v1.Backup
backupLocation *v1.BackupStorageLocation
expectedResult *v1.Backup
}{
{
name: "backup with no backup location gets the default",
backup: arktest.NewTestBackup().WithName("backup-1").Backup,
backupLocation: defaultBackupLocation,
expectedResult: &v1.Backup{
ObjectMeta: metav1.ObjectMeta{
Namespace: v1.DefaultNamespace,
Name: "backup-1",
Labels: map[string]string{
"ark.heptio.com/storage-location": "loc-1",
},
},
Spec: v1.BackupSpec{
StorageLocation: defaultBackupLocation.Name,
},
Status: v1.BackupStatus{
Phase: v1.BackupPhaseCompleted,
Version: 1,
StartTimestamp: metav1.NewTime(now),
CompletionTimestamp: metav1.NewTime(now),
},
},
},
{
name: "backup with a specific backup location keeps it",
backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("alt-loc").Backup,
backupLocation: arktest.NewTestBackupStorageLocation().WithName("alt-loc").BackupStorageLocation,
expectedResult: &v1.Backup{
ObjectMeta: metav1.ObjectMeta{
Namespace: v1.DefaultNamespace,
Name: "backup-1",
Labels: map[string]string{
"ark.heptio.com/storage-location": "alt-loc",
},
},
Spec: v1.BackupSpec{
StorageLocation: "alt-loc",
},
Status: v1.BackupStatus{
Phase: v1.BackupPhaseCompleted,
Version: 1,
StartTimestamp: metav1.NewTime(now),
CompletionTimestamp: metav1.NewTime(now),
},
},
},
{
name: "backup with a TTL has expiration set",
backup: arktest.NewTestBackup().WithName("backup-1").WithTTL(10 * time.Minute).Backup,
backupLocation: defaultBackupLocation,
expectedResult: &v1.Backup{
ObjectMeta: metav1.ObjectMeta{
Namespace: v1.DefaultNamespace,
Name: "backup-1",
Labels: map[string]string{
"ark.heptio.com/storage-location": "loc-1",
},
},
Spec: v1.BackupSpec{
TTL: metav1.Duration{Duration: 10 * time.Minute},
StorageLocation: defaultBackupLocation.Name,
},
Status: v1.BackupStatus{
Phase: v1.BackupPhaseCompleted,
Version: 1,
Expiration: metav1.NewTime(now.Add(10 * time.Minute)),
StartTimestamp: metav1.NewTime(now),
CompletionTimestamp: metav1.NewTime(now),
},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
var (
clientset = fake.NewSimpleClientset(test.backup)
sharedInformers = informers.NewSharedInformerFactory(clientset, 0)
logger = logging.DefaultLogger(logrus.DebugLevel)
pluginManager = new(pluginmocks.Manager)
backupStore = new(persistencemocks.BackupStore)
backupper = new(fakeBackupper)
)
c := &backupController{
genericController: newGenericController("backup-test", logger),
client: clientset.ArkV1(),
lister: sharedInformers.Ark().V1().Backups().Lister(),
backupLocationLister: sharedInformers.Ark().V1().BackupStorageLocations().Lister(),
defaultBackupLocation: defaultBackupLocation.Name,
backupTracker: NewBackupTracker(),
metrics: metrics.NewServerMetrics(),
clock: clock.NewFakeClock(now),
newPluginManager: func(logrus.FieldLogger) plugin.Manager { return pluginManager },
newBackupStore: func(*v1.BackupStorageLocation, persistence.ObjectStoreGetter, logrus.FieldLogger) (persistence.BackupStore, error) {
return backupStore, nil
},
backupper: backupper,
}
pluginManager.On("GetBackupItemActions").Return(nil, nil)
pluginManager.On("CleanupClients").Return(nil)
backupper.On("Backup", mock.Anything, mock.Anything, mock.Anything, []pkgbackup.ItemAction(nil), pluginManager).Return(nil)
// Ensure we have a CompletionTimestamp when uploading.
// Failures will display the bytes in buf.
completionTimestampIsPresent := func(buf *bytes.Buffer) bool {
return strings.Contains(buf.String(), `"completionTimestamp": "2006-01-02T22:04:05Z"`)
}
backupStore.On("PutBackup", test.backup.Name, mock.MatchedBy(completionTimestampIsPresent), mock.Anything, mock.Anything).Return(nil)
// add the test's backup to the informer/lister store
require.NotNil(t, test.backup)
require.NoError(t, sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(test.backup))
// add the default backup storage location to the clientset and the informer/lister store
_, err := clientset.ArkV1().BackupStorageLocations(defaultBackupLocation.Namespace).Create(defaultBackupLocation)
require.NoError(t, err)
require.NoError(t, sharedInformers.Ark().V1().BackupStorageLocations().Informer().GetStore().Add(defaultBackupLocation))
// add the test's backup storage location to the clientset and the informer/lister store
// if it's different than the default
if test.backupLocation != nil && test.backupLocation != defaultBackupLocation {
_, err := clientset.ArkV1().BackupStorageLocations(test.backupLocation.Namespace).Create(test.backupLocation)
require.NoError(t, err)
require.NoError(t, sharedInformers.Ark().V1().BackupStorageLocations().Informer().GetStore().Add(test.backupLocation))
}
require.NoError(t, c.processBackup(fmt.Sprintf("%s/%s", test.backup.Namespace, test.backup.Name)))
res, err := clientset.ArkV1().Backups(test.backup.Namespace).Get(test.backup.Name, metav1.GetOptions{})
require.NoError(t, err)
assert.Equal(t, test.expectedResult, res)
})
}
}
func TestValidateAndGetSnapshotLocations(t *testing.T) {
defaultLocationsAWS := map[string]*v1.VolumeSnapshotLocation{
"aws": arktest.NewTestVolumeSnapshotLocation().WithName("aws-us-east-2").VolumeSnapshotLocation,
}
defaultLocationsFake := map[string]*v1.VolumeSnapshotLocation{
"fake-provider": arktest.NewTestVolumeSnapshotLocation().WithName("some-name").VolumeSnapshotLocation,
}
multipleLocationNames := []string{"aws-us-west-1", "aws-us-east-1"}
@@ -463,7 +391,7 @@ func TestDefaultAndValidateSnapshotLocations(t *testing.T) {
name string
backup *arktest.TestBackup
locations []*arktest.TestVolumeSnapshotLocation
defaultLocations map[string]string
defaultLocations map[string]*v1.VolumeSnapshotLocation
expectedVolumeSnapshotLocationNames []string // adding these in the expected order will allow to test with better msgs in case of a test failure
expectedErrors string
expectedSuccess bool
@@ -493,7 +421,7 @@ func TestDefaultAndValidateSnapshotLocations(t *testing.T) {
name: "no location name for the provider exists: the provider's default should be added",
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew),
defaultLocations: defaultLocationsAWS,
expectedVolumeSnapshotLocationNames: []string{defaultLocationsAWS["aws"]},
expectedVolumeSnapshotLocationNames: []string{defaultLocationsAWS["aws"].Name},
expectedSuccess: true,
},
{
@@ -506,7 +434,7 @@ func TestDefaultAndValidateSnapshotLocations(t *testing.T) {
backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithVolumeSnapshotLocations(dupLocationNames),
locations: arktest.NewTestVolumeSnapshotLocation().WithName(dupLocationNames[0]).WithProviderConfig(dupLocationList),
defaultLocations: defaultLocationsFake,
expectedVolumeSnapshotLocationNames: []string{dupLocationNames[0], defaultLocationsFake["fake-provider"]},
expectedVolumeSnapshotLocationNames: []string{dupLocationNames[0], defaultLocationsFake["fake-provider"].Name},
expectedSuccess: true,
},
}
@@ -519,7 +447,8 @@ func TestDefaultAndValidateSnapshotLocations(t *testing.T) {
)
c := &backupController{
snapshotLocationLister: sharedInformers.Ark().V1().VolumeSnapshotLocations().Lister(),
snapshotLocationLister: sharedInformers.Ark().V1().VolumeSnapshotLocations().Lister(),
defaultSnapshotLocations: test.defaultLocations,
}
// set up a Backup object to represent what we expect to be passed to backupper.Backup()
@@ -529,15 +458,23 @@ func TestDefaultAndValidateSnapshotLocations(t *testing.T) {
require.NoError(t, sharedInformers.Ark().V1().VolumeSnapshotLocations().Informer().GetStore().Add(location.VolumeSnapshotLocation))
}
errs := c.defaultAndValidateSnapshotLocations(backup, test.defaultLocations)
providerLocations, errs := c.validateAndGetSnapshotLocations(backup)
if test.expectedSuccess {
for _, err := range errs {
require.NoError(t, errors.New(err), "defaultAndValidateSnapshotLocations unexpected error: %v", err)
require.NoError(t, errors.New(err), "validateAndGetSnapshotLocations unexpected error: %v", err)
}
require.Equal(t, test.expectedVolumeSnapshotLocationNames, backup.Spec.VolumeSnapshotLocations)
var locations []string
for _, loc := range providerLocations {
locations = append(locations, loc.Name)
}
sort.Strings(test.expectedVolumeSnapshotLocationNames)
sort.Strings(locations)
require.Equal(t, test.expectedVolumeSnapshotLocationNames, locations)
} else {
if len(errs) == 0 {
require.Error(t, nil, "defaultAndValidateSnapshotLocations expected error")
require.Error(t, nil, "validateAndGetSnapshotLocations expected error")
}
require.Contains(t, errs, test.expectedErrors)
}