mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-11 15:30:34 +00:00
Compare commits
2 Commits
v1.4.1
...
release-1.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4fc1c70028 | ||
|
|
56a08a4d69 |
2
.github/workflows/pr.yml
vendored
2
.github/workflows/pr.yml
vendored
@@ -1,5 +1,5 @@
|
||||
name: Pull Request CI Check
|
||||
on: [push, pull_request]
|
||||
on: [pull_request]
|
||||
jobs:
|
||||
|
||||
build:
|
||||
|
||||
34
.github/workflows/push.yml
vendored
Normal file
34
.github/workflows/push.yml
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
name: Master CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- name: Set up Go 1.14
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Build
|
||||
run: make local
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
- name: Publish container image
|
||||
run: |
|
||||
docker login -u ${{ secrets.DOCKER_USER }} -p ${{ secrets.DOCKER_PASSWORD }}
|
||||
./hack/docker-push.sh
|
||||
24
.travis.yml
24
.travis.yml
@@ -1,24 +0,0 @@
|
||||
dist: xenial
|
||||
language: go
|
||||
go:
|
||||
- 1.14.x
|
||||
sudo: required
|
||||
services:
|
||||
- docker
|
||||
script:
|
||||
- export BRANCH=$(if [ "$TRAVIS_PULL_REQUEST" == "false" ]; then echo $TRAVIS_BRANCH;
|
||||
else echo $TRAVIS_PULL_REQUEST_BRANCH; fi)
|
||||
- echo "TRAVIS_BRANCH=$TRAVIS_BRANCH, PR=$TRAVIS_PULL_REQUEST, BRANCH=$BRANCH"
|
||||
- hack/ci-check.sh
|
||||
deploy:
|
||||
- provider: script
|
||||
skip_cleanup: true
|
||||
script: hack/docker-push.sh
|
||||
on:
|
||||
repo: vmware-tanzu/velero
|
||||
all_branches: true
|
||||
env:
|
||||
global:
|
||||
# generated using `travis encrypt`: https://docs.travis-ci.com/user/environment-variables#encrypting-environment-variables
|
||||
- secure: Sa0R7bhOvrybIWvURjEpOLQ3/v1IqiSM2uwYTSMhLtwHunkiYXpbXi/wVPkujM7kgnFeJzGfNTZI6blkl3Vnz6Ca48avxiv+KRk7bNlTn/e2LkJaqOh/NcwqrVyWG8cZpWTHAzwJkHrV6xcWRTWFqx/UhUHH4PdmXd0pj3/DDxHhUZXJkZfYDC2uI+kmhJd9x1yxxz6OTXrGeMw22R5RtgltcQYjYWjGhXjNJ96+0QhC6juCwwtuU3oz7vfV1dP6ea9yeiHKCdDR0xp/Ymtlt4DULgQv4vuVBCR7LuPjsdLI1QslKbCz+vk/XwEcckf3R+yJUaArGLSM0f3AwuAZnETwkfKu9gJCEz8Xvvwsy5QHj/DGeSr+JY4Ylb1YzJG48R5QMcNwOgQOpewamBggxkyyOlNanUGzMJ8F7zjDOZjOUsRSSG+cn01/oJEHNyFmNkxbugOGuxqpi8qIfyWrdPSb8AdnUVUi2GDR/unSm2uZLSJtjaeypqzxalFdL04pbkk6/vKZTi6VMIyewHeR0XIC/VKQ+av2CwjnfqTtTHQHMh2kVxVzy4SKPYgWvbUUg9SG0z2lmfBWzE1HS+ozUKruxBu/YnPHaFz3D/+w0AwtHSYp3FMkCC/2SZU1hiOzAKX+S8T5K1OKoBiIEEUVa90EnSslCIiNIUHXgdtG62U=
|
||||
- secure: MAHgHvxxPJKxcECwdjaTa/ZW2wlhfcpcprXZqPJKQVFS6S5Wc+xf0CBRggnZT/aEs52EpD2bZy505a4uy9EcTDeq7q5oaXbOBFdyKgB1leWiycup60p6BmVqre5//EzXe+CJpkSu46ajcVORJIEGjs0tS5p0HV70sqpsfd13L2pXpmZ54RIj+TOm7Tz6VVeSNUmQ7WymIrvKg4cQ50w+aS7ak+gy8b6oYKWX01oZPQZA3W1pvYcBgBKobiQN8dh7WvT6QmTu/38WykTjt+9CfEJOboitgEESMjf06ueaLrhQoKQafH3XzfkfEW7PWWGl1RZYMEf15Joj1edIW1i9eYZ1T3fvBaIwvgc7lLgNdzc5ZxG8eEtoDSfB0+gDJOPBLCo+5G3xvRCie7OkKvXdijkCKmc+FRMJlayHkEw75cgId9svDEJghtt2VlWAM4ba4S79Og+cyI0VROkzS0FEjabz74tU9haVaypVnLmzEC/BhilzCt/jhRuYInpsMBeasGQ6Rg8gjos4AQamdaXJRI520o7zFWXVYqctfPr+mrVOYCfUISnSS3uqb4UlvLBqXDPVWBBIgmPrSgvHXxbjsajFuyWTULm60nj/JUuh7BC9HF9pil3g4/2E9cpLZCFQxVYu6+30eEv4dCO7Ptq3XIINDWbbS6Pmw62/ZsE8ABU=
|
||||
@@ -33,8 +33,8 @@ If you are ready to jump in and test, add code, or help with documentation, foll
|
||||
|
||||
See [the list of releases][6] to find out about feature changes.
|
||||
|
||||
[1]: https://travis-ci.org/vmware-tanzu/velero.svg?branch=master
|
||||
[2]: https://travis-ci.org/vmware-tanzu/velero
|
||||
[1]: https://github.com/vmware-tanzu/velero/workflows/Master%20CI/badge.svg
|
||||
[2]: https://github.com/vmware-tanzu/velero/actions?query=workflow%3A"Master+CI"
|
||||
[4]: https://github.com/vmware-tanzu/velero/issues
|
||||
[6]: https://github.com/vmware-tanzu/velero/releases
|
||||
[9]: https://kubernetes.io/docs/setup/
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
## v1.4.1
|
||||
### 2020-07-13
|
||||
## v1.4.3
|
||||
### 2020-10-20
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.4.1
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.4.3
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.4.1`
|
||||
`velero/velero:v1.4.3`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.4/
|
||||
@@ -14,9 +14,35 @@ https://velero.io/docs/v1.4/
|
||||
https://velero.io/docs/v1.4/upgrade-to-1.4/
|
||||
|
||||
### All Changes
|
||||
* Restore CRD Resource name to fix CRD wait functionality. (#2949, @sseago)
|
||||
* rename the PV if VolumeSnapshotter has modified the PV name (#2835, @pawanpraka1)
|
||||
* Ensure that bound PVCs and PVs remain bound on restore. (#3007, @nrb)
|
||||
|
||||
## v1.4.2
|
||||
### 2020-07-13
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.4.2
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.4.2`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.4/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.4/upgrade-to-1.4/
|
||||
|
||||
### All Changes
|
||||
* log a warning instead of erroring if an additional item returned from a plugin can't be found in the Kubernetes API (#2595, @skriss)
|
||||
* Adjust restic default time out to 4 hours and base pod resource requests to 500m CPU/512Mi memory. (#2696, @nrb)
|
||||
* capture version of the CRD prior before invoking the remap_crd_version backup item action (#2683, @ashish-amarnath)
|
||||
|
||||
|
||||
## v1.4.1
|
||||
|
||||
This tag was created in code, but has no associated docker image due to misconfigured building infrastructure. v1.4.2 fixes this.
|
||||
|
||||
## v1.4.0
|
||||
### 2020-05-26
|
||||
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
capture version of the CRD prior before invoking the remap_crd_version backup item action
|
||||
@@ -1 +0,0 @@
|
||||
Adjust restic default time out to 4 hours and base pod resource requests to 500m CPU/512Mi memory.
|
||||
@@ -20,8 +20,8 @@
|
||||
|
||||
set +x
|
||||
|
||||
if [[ -z "$TRAVIS" ]]; then
|
||||
echo "This script is intended to be run only on Travis." >&2
|
||||
if [[ -z "$CI" ]]; then
|
||||
echo "This script is intended to be run only on Github Actions." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -47,37 +47,43 @@ function highest_release() {
|
||||
done
|
||||
}
|
||||
|
||||
triggeredBy=$(echo $GITHUB_REF | cut -d / -f 2)
|
||||
if [[ "$triggeredBy" == "heads" ]]; then
|
||||
BRANCH=$(echo $GITHUB_REF | cut -d / -f 3)
|
||||
TAG=
|
||||
elif [[ "$triggeredBy" == "tags" ]]; then
|
||||
BRANCH=
|
||||
TAG=$(echo $GITHUB_REF | cut -d / -f 3)
|
||||
fi
|
||||
|
||||
if [[ "$BRANCH" == "master" ]]; then
|
||||
VERSION="$BRANCH"
|
||||
elif [[ ! -z "$TRAVIS_TAG" ]]; then
|
||||
# Tags aren't fetched by Travis on checkout, and we don't need them for master
|
||||
elif [[ ! -z "$TAG" ]]; then
|
||||
# Explicitly checkout tags when building from a git tag.
|
||||
# This is not needed when building from master
|
||||
git fetch --tags
|
||||
# Calculate the latest release if there's a tag.
|
||||
highest_release
|
||||
VERSION="$TRAVIS_TAG"
|
||||
VERSION="$TAG"
|
||||
else
|
||||
# If we're not on master and we're not building a tag, exit early.
|
||||
echo "We're not on master and we're not building a tag, exit early."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
# Assume we're not tagging `latest` by default, and never on master.
|
||||
TAG_LATEST=false
|
||||
if [[ "$BRANCH" == "master" ]]; then
|
||||
echo "Building master, not tagging latest."
|
||||
elif [[ "$TRAVIS_TAG" == "$HIGHEST" ]]; then
|
||||
elif [[ "$TAG" == "$HIGHEST" ]]; then
|
||||
TAG_LATEST=true
|
||||
fi
|
||||
|
||||
# Debugging info
|
||||
echo "Highest tag found: $HIGHEST"
|
||||
echo "BRANCH: $BRANCH"
|
||||
echo "TRAVIS_TAG: $TRAVIS_TAG"
|
||||
echo "TAG: $TAG"
|
||||
echo "TAG_LATEST: $TAG_LATEST"
|
||||
|
||||
echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin
|
||||
unset GIT_HTTP_USER_AGENT
|
||||
|
||||
echo "Building and pushing container images."
|
||||
|
||||
VERSION="$VERSION" TAG_LATEST="$TAG_LATEST" make all-containers all-push all-manifests
|
||||
|
||||
@@ -1723,7 +1723,7 @@ func TestBackupActionAdditionalItems(t *testing.T) {
|
||||
},
|
||||
|
||||
{
|
||||
name: "if there's an error backing up additional items, the item the action was run for isn't backed up",
|
||||
name: "if additional items aren't found in the API, they're skipped and the original item is still backed up",
|
||||
backup: defaultBackup().Result(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.Pods(
|
||||
@@ -1746,8 +1746,10 @@ func TestBackupActionAdditionalItems(t *testing.T) {
|
||||
},
|
||||
},
|
||||
want: []string{
|
||||
"resources/pods/namespaces/ns-1/pod-1.json",
|
||||
"resources/pods/namespaces/ns-2/pod-2.json",
|
||||
"resources/pods/namespaces/ns-3/pod-3.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-1/pod-1.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-2/pod-2.json",
|
||||
"resources/pods/v1-preferredversion/namespaces/ns-3/pod-3.json",
|
||||
},
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -341,12 +342,20 @@ func (ib *itemBackupper) executeActions(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
additionalItem, err := client.Get(additionalItem.Name, metav1.GetOptions{})
|
||||
item, err := client.Get(additionalItem.Name, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"groupResource": additionalItem.GroupResource,
|
||||
"namespace": additionalItem.Namespace,
|
||||
"name": additionalItem.Name,
|
||||
}).Warnf("Additional item was not found in Kubernetes API, can't back it up")
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
if _, err = ib.backupItem(log, additionalItem, gvr.GroupResource(), gvr); err != nil {
|
||||
if _, err = ib.backupItem(log, item, gvr.GroupResource(), gvr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,7 +30,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/kuberesource"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
|
||||
)
|
||||
|
||||
@@ -110,7 +109,7 @@ func fetchV1beta1CRD(name string, betaCRDClient apiextv1beta1client.CustomResour
|
||||
// See https://github.com/kubernetes/kubernetes/issues/3030. Unsure why this is happening here and not in main Velero;
|
||||
// probably has to do with List calls and Dynamic client vs typed client
|
||||
// Set these all the time, since they shouldn't ever be different, anyway
|
||||
betaCRD.Kind = kuberesource.CustomResourceDefinitions.Resource
|
||||
betaCRD.Kind = "CustomResourceDefinition"
|
||||
betaCRD.APIVersion = apiextv1beta1.SchemeGroupVersion.String()
|
||||
|
||||
m, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&betaCRD)
|
||||
|
||||
@@ -70,11 +70,9 @@ func DescribeBackup(
|
||||
}
|
||||
}
|
||||
|
||||
if status.Phase == velerov1api.BackupPhasePartiallyFailed {
|
||||
d.Println()
|
||||
d.Printf("Errors:\t%d\n", status.Errors)
|
||||
d.Printf("Warnings:\t%d\n", status.Warnings)
|
||||
}
|
||||
d.Println()
|
||||
d.Printf("Errors:\t%d\n", status.Errors)
|
||||
d.Printf("Warnings:\t%d\n", status.Warnings)
|
||||
|
||||
d.Println()
|
||||
DescribeBackupSpec(d, backup.Spec)
|
||||
|
||||
@@ -35,6 +35,8 @@ var (
|
||||
// https://github.com/kubernetes/kubernetes/blob/v1.15.3/pkg/printers/tableprinter.go#L204
|
||||
{Name: "Name", Type: "string", Format: "name"},
|
||||
{Name: "Status"},
|
||||
{Name: "Errors"},
|
||||
{Name: "Warnings"},
|
||||
{Name: "Created"},
|
||||
{Name: "Expires"},
|
||||
{Name: "Storage Location"},
|
||||
@@ -58,7 +60,6 @@ func printBackupList(list *velerov1api.BackupList) []metav1.TableRow {
|
||||
var timestampSuffix = regexp.MustCompile("-[0-9]{14}$")
|
||||
|
||||
func sortBackupsByPrefixAndTimestamp(list *velerov1api.BackupList) {
|
||||
|
||||
sort.Slice(list.Items, func(i, j int) bool {
|
||||
iSuffixIndex := timestampSuffix.FindStringIndex(list.Items[i].Name)
|
||||
jSuffixIndex := timestampSuffix.FindStringIndex(list.Items[j].Name)
|
||||
@@ -98,18 +99,17 @@ func printBackup(backup *velerov1api.Backup) []metav1.TableRow {
|
||||
if backup.DeletionTimestamp != nil && !backup.DeletionTimestamp.Time.IsZero() {
|
||||
status = "Deleting"
|
||||
}
|
||||
if status == string(velerov1api.BackupPhasePartiallyFailed) {
|
||||
if backup.Status.Errors == 1 {
|
||||
status = fmt.Sprintf("%s (1 error)", status)
|
||||
} else {
|
||||
status = fmt.Sprintf("%s (%d errors)", status, backup.Status.Errors)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
location := backup.Spec.StorageLocation
|
||||
|
||||
row.Cells = append(row.Cells, backup.Name, status, backup.Status.StartTimestamp, humanReadableTimeFromNow(expiration), location, metav1.FormatLabelSelector(backup.Spec.LabelSelector))
|
||||
row.Cells = append(row.Cells,
|
||||
backup.Name,
|
||||
status,
|
||||
backup.Status.Errors,
|
||||
backup.Status.Warnings,
|
||||
backup.Status.StartTimestamp,
|
||||
humanReadableTimeFromNow(expiration),
|
||||
backup.Spec.StorageLocation,
|
||||
metav1.FormatLabelSelector(backup.Spec.LabelSelector),
|
||||
)
|
||||
|
||||
return []metav1.TableRow{row}
|
||||
}
|
||||
|
||||
@@ -30,8 +30,8 @@ var (
|
||||
{Name: "Name", Type: "string", Format: "name"},
|
||||
{Name: "Backup"},
|
||||
{Name: "Status"},
|
||||
{Name: "Warnings"},
|
||||
{Name: "Errors"},
|
||||
{Name: "Warnings"},
|
||||
{Name: "Created"},
|
||||
{Name: "Selector"},
|
||||
}
|
||||
@@ -60,8 +60,8 @@ func printRestore(restore *v1.Restore) []metav1.TableRow {
|
||||
restore.Name,
|
||||
restore.Spec.BackupName,
|
||||
status,
|
||||
restore.Status.Warnings,
|
||||
restore.Status.Errors,
|
||||
restore.Status.Warnings,
|
||||
restore.CreationTimestamp.Time,
|
||||
metav1.FormatLabelSelector(restore.Spec.LabelSelector),
|
||||
)
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
var (
|
||||
ClusterRoleBindings = schema.GroupResource{Group: "rbac.authorization.k8s.io", Resource: "clusterrolebindings"}
|
||||
ClusterRoles = schema.GroupResource{Group: "rbac.authorization.k8s.io", Resource: "clusterroles"}
|
||||
CustomResourceDefinitions = schema.GroupResource{Group: "apiextensions.k8s.io", Resource: "CustomResourceDefinition"}
|
||||
CustomResourceDefinitions = schema.GroupResource{Group: "apiextensions.k8s.io", Resource: "customresourcedefinitions"}
|
||||
Jobs = schema.GroupResource{Group: "batch", Resource: "jobs"}
|
||||
Namespaces = schema.GroupResource{Group: "", Resource: "namespaces"}
|
||||
PersistentVolumeClaims = schema.GroupResource{Group: "", Resource: "persistentvolumeclaims"}
|
||||
|
||||
@@ -47,20 +47,6 @@ func (r *pvRestorer) executePVAction(obj *unstructured.Unstructured) (*unstructu
|
||||
return nil, errors.New("PersistentVolume is missing its name")
|
||||
}
|
||||
|
||||
// It's simpler to just access the spec through the unstructured object than to convert
|
||||
// to structured and back here, especially since the SetVolumeID(...) call below needs
|
||||
// the unstructured representation (and does a conversion internally).
|
||||
res, ok := obj.Object["spec"]
|
||||
if !ok {
|
||||
return nil, errors.New("spec not found")
|
||||
}
|
||||
spec, ok := res.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, errors.Errorf("spec was of type %T, expected map[string]interface{}", res)
|
||||
}
|
||||
|
||||
delete(spec, "claimRef")
|
||||
|
||||
if boolptr.IsSetToFalse(r.snapshotVolumes) {
|
||||
// The backup had snapshots disabled, so we can return early
|
||||
return obj, nil
|
||||
|
||||
@@ -56,19 +56,6 @@ func TestExecutePVAction_NoSnapshotRestores(t *testing.T) {
|
||||
restore: builder.ForRestore(api.DefaultNamespace, "").Result(),
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "no spec should error",
|
||||
obj: NewTestUnstructured().WithName("pv-1").Unstructured,
|
||||
restore: builder.ForRestore(api.DefaultNamespace, "").Result(),
|
||||
expectedErr: true,
|
||||
},
|
||||
{
|
||||
name: "ensure spec.claimRef is deleted",
|
||||
obj: NewTestUnstructured().WithName("pv-1").WithAnnotations("a", "b").WithSpec("claimRef", "someOtherField").Unstructured,
|
||||
restore: builder.ForRestore(api.DefaultNamespace, "").RestorePVs(false).Result(),
|
||||
backup: defaultBackup().Phase(api.BackupPhaseInProgress).Result(),
|
||||
expectedRes: NewTestUnstructured().WithAnnotations("a", "b").WithName("pv-1").WithSpec("someOtherField").Unstructured,
|
||||
},
|
||||
{
|
||||
name: "ensure spec.storageClassName is retained",
|
||||
obj: NewTestUnstructured().WithName("pv-1").WithAnnotations("a", "b").WithSpec("storageClassName", "someOtherField").Unstructured,
|
||||
@@ -81,7 +68,7 @@ func TestExecutePVAction_NoSnapshotRestores(t *testing.T) {
|
||||
obj: NewTestUnstructured().WithName("pv-1").WithAnnotations("a", "b").WithSpec("claimRef", "storageClassName", "someOtherField").Unstructured,
|
||||
restore: builder.ForRestore(api.DefaultNamespace, "").RestorePVs(true).Result(),
|
||||
backup: defaultBackup().Phase(api.BackupPhaseInProgress).SnapshotVolumes(false).Result(),
|
||||
expectedRes: NewTestUnstructured().WithName("pv-1").WithAnnotations("a", "b").WithSpec("storageClassName", "someOtherField").Unstructured,
|
||||
expectedRes: NewTestUnstructured().WithName("pv-1").WithAnnotations("a", "b").WithSpec("claimRef", "storageClassName", "someOtherField").Unstructured,
|
||||
},
|
||||
{
|
||||
name: "restore.spec.restorePVs=false, return early",
|
||||
|
||||
@@ -60,6 +60,14 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/volume"
|
||||
)
|
||||
|
||||
// These annotations are taken from the Kubernetes persistent volume/persistent volume claim controller.
|
||||
// They cannot be directly importing because they are part of the kubernetes/kubernetes package, and importing that package is unsupported.
|
||||
// Their values are well-known and slow changing. They're duplicated here as constants to provide compile-time checking.
|
||||
// Originals can be found in kubernetes/kubernetes/pkg/controller/volume/persistentvolume/util/util.go.
|
||||
const KubeAnnBindCompleted = "pv.kubernetes.io/bind-completed"
|
||||
const KubeAnnBoundByController = "pv.kubernetes.io/bound-by-controller"
|
||||
const KubeAnnDynamicallyProvisioned = "pv.kubernetes.io/provisioned-by"
|
||||
|
||||
type VolumeSnapshotterGetter interface {
|
||||
GetVolumeSnapshotter(name string) (velero.VolumeSnapshotter, error)
|
||||
}
|
||||
@@ -868,6 +876,13 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc
|
||||
return warnings, errs
|
||||
}
|
||||
|
||||
// Check to see if the claimRef.namespace field needs to be remapped, and do so if necessary.
|
||||
_, err = remapClaimRefNS(ctx, obj)
|
||||
if err != nil {
|
||||
errs.Add(namespace, err)
|
||||
return warnings, errs
|
||||
}
|
||||
|
||||
var shouldRestoreSnapshot bool
|
||||
if !shouldRenamePV {
|
||||
// Check if the PV exists in the cluster before attempting to create
|
||||
@@ -885,6 +900,9 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc
|
||||
}
|
||||
|
||||
if shouldRestoreSnapshot {
|
||||
// reset the PV's binding status so that Kubernetes can properly associate it with the restored PVC.
|
||||
obj = resetVolumeBindingInfo(obj)
|
||||
|
||||
// even if we're renaming the PV, obj still has the old name here, because the pvRestorer
|
||||
// uses the original name to look up metadata about the snapshot.
|
||||
ctx.log.Infof("Restoring persistent volume from snapshot.")
|
||||
@@ -894,6 +912,11 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc
|
||||
return warnings, errs
|
||||
}
|
||||
obj = updatedObj
|
||||
|
||||
// VolumeSnapshotter has modified the PV name, we should rename the PV
|
||||
if oldName != obj.GetName() {
|
||||
shouldRenamePV = true
|
||||
}
|
||||
}
|
||||
|
||||
if shouldRenamePV {
|
||||
@@ -939,8 +962,9 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc
|
||||
default:
|
||||
ctx.log.Infof("Restoring persistent volume as-is because it doesn't have a snapshot and its reclaim policy is not Delete.")
|
||||
|
||||
// we call the pvRestorer here to clear out the PV's claimRef, so it can be re-claimed
|
||||
// when its PVC is restored.
|
||||
obj = resetVolumeBindingInfo(obj)
|
||||
// we call the pvRestorer here to clear out the PV's claimRef.UID, so it can be re-claimed
|
||||
// when its PVC is restored and gets a new UID.
|
||||
updatedObj, err := ctx.pvRestorer.executePVAction(obj)
|
||||
if err != nil {
|
||||
errs.Add(namespace, fmt.Errorf("error executing PVAction for %s: %v", resourceID, err))
|
||||
@@ -1033,17 +1057,16 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc
|
||||
return warnings, errs
|
||||
}
|
||||
|
||||
if pvc.Spec.VolumeName != "" && ctx.pvsToProvision.Has(pvc.Spec.VolumeName) {
|
||||
ctx.log.Infof("Resetting PersistentVolumeClaim %s/%s for dynamic provisioning", namespace, name)
|
||||
if pvc.Spec.VolumeName != "" {
|
||||
// This used to only happen with restic volumes, but now always remove this binding metadata
|
||||
obj = resetVolumeBindingInfo(obj)
|
||||
|
||||
// use the unstructured helpers here since we're only deleting and
|
||||
// the unstructured converter will add back (empty) fields for metadata
|
||||
// and status that we removed earlier.
|
||||
unstructured.RemoveNestedField(obj.Object, "spec", "volumeName")
|
||||
annotations := obj.GetAnnotations()
|
||||
delete(annotations, "pv.kubernetes.io/bind-completed")
|
||||
delete(annotations, "pv.kubernetes.io/bound-by-controller")
|
||||
obj.SetAnnotations(annotations)
|
||||
// This is the case for restic volumes, where we need to actually have an empty volume created instead of restoring one.
|
||||
// The assumption is that any PV in pvsToProvision doesn't have an associated snapshot.
|
||||
if ctx.pvsToProvision.Has(pvc.Spec.VolumeName) {
|
||||
ctx.log.Infof("Resetting PersistentVolumeClaim %s/%s for dynamic provisioning", namespace, name)
|
||||
unstructured.RemoveNestedField(obj.Object, "spec", "volumeName")
|
||||
}
|
||||
}
|
||||
|
||||
if newName, ok := ctx.renamedPVs[pvc.Spec.VolumeName]; ok {
|
||||
@@ -1192,6 +1215,40 @@ func shouldRenamePV(ctx *context, obj *unstructured.Unstructured, client client.
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// remapClaimRefNS remaps a PersistentVolume's claimRef.Namespace based on a restore's NamespaceMappings, if necessary.
|
||||
// Returns true if the namespace was remapped, false if it was not required.
|
||||
func remapClaimRefNS(ctx *context, obj *unstructured.Unstructured) (bool, error) {
|
||||
if len(ctx.restore.Spec.NamespaceMapping) == 0 {
|
||||
ctx.log.Debug("Persistent volume does not need to have the claimRef.namespace remapped because restore is not remapping any namespaces")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Conversion to the real type here is more readable than all the error checking involved with reading each field individually.
|
||||
pv := new(v1.PersistentVolume)
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, pv); err != nil {
|
||||
return false, errors.Wrapf(err, "error converting persistent volume to structured")
|
||||
}
|
||||
|
||||
if pv.Spec.ClaimRef == nil {
|
||||
ctx.log.Debugf("Persistent volume does not need to have the claimRef.namepace remapped because it's not claimed")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
targetNS, ok := ctx.restore.Spec.NamespaceMapping[pv.Spec.ClaimRef.Namespace]
|
||||
|
||||
if !ok {
|
||||
ctx.log.Debugf("Persistent volume does not need to have the claimRef.namespace remapped because it's not claimed by a PVC in a namespace that's being remapped")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
err := unstructured.SetNestedField(obj.Object, targetNS, "spec", "claimRef", "namespace")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
ctx.log.Debug("Persistent volume's namespace was updated")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// restorePodVolumeBackups restores the PodVolumeBackups for the given restored pod
|
||||
func restorePodVolumeBackups(ctx *context, createdObj *unstructured.Unstructured, originalNamespace string) {
|
||||
if ctx.resticRestorer == nil {
|
||||
@@ -1269,6 +1326,29 @@ func hasDeleteReclaimPolicy(obj map[string]interface{}) bool {
|
||||
return policy == string(v1.PersistentVolumeReclaimDelete)
|
||||
}
|
||||
|
||||
// resetVolumeBindingInfo clears any necessary metadata out of a PersistentVolume or PersistentVolumeClaim that would make it ineligible to be re-bound by Velero.
|
||||
func resetVolumeBindingInfo(obj *unstructured.Unstructured) *unstructured.Unstructured {
|
||||
// Clean out ClaimRef UID and resourceVersion, since this information is highly unique.
|
||||
unstructured.RemoveNestedField(obj.Object, "spec", "claimRef", "uid")
|
||||
unstructured.RemoveNestedField(obj.Object, "spec", "claimRef", "resourceVersion")
|
||||
|
||||
// Clear out any annotations used by the Kubernetes PV controllers to track bindings.
|
||||
annotations := obj.GetAnnotations()
|
||||
|
||||
// Upon restore, this new PV will look like a statically provisioned, manually-bound volume rather than one bound by the controller, so remove the annotation that signals that a controller bound it.
|
||||
delete(annotations, KubeAnnBindCompleted)
|
||||
// Remove the annotation that signals that the PV is already bound; we want the PV(C) controller to take the two objects and bind them again.
|
||||
delete(annotations, KubeAnnBoundByController)
|
||||
|
||||
// Remove the provisioned-by annotation which signals that the persistent volume was dynamically provisioned; it is now statically provisioned.
|
||||
delete(annotations, KubeAnnDynamicallyProvisioned)
|
||||
|
||||
// GetAnnotations returns a copy, so we have to set them again
|
||||
obj.SetAnnotations(annotations)
|
||||
|
||||
return obj
|
||||
}
|
||||
|
||||
func resetMetadataAndStatus(obj *unstructured.Unstructured) (*unstructured.Unstructured, error) {
|
||||
res, ok := obj.Object["metadata"]
|
||||
if !ok {
|
||||
|
||||
@@ -1834,7 +1834,7 @@ func TestRestorePersistentVolumes(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when a PV with a reclaim policy of retain has no snapshot and does not exist in-cluster, it gets restored, without its claim ref",
|
||||
name: "when a PV with a reclaim policy of retain has no snapshot and does not exist in-cluster, it gets restored, with its claim ref",
|
||||
restore: defaultRestore().Result(),
|
||||
backup: defaultBackup().Result(),
|
||||
tarball: newTarWriter(t).
|
||||
@@ -1853,6 +1853,7 @@ func TestRestorePersistentVolumes(t *testing.T) {
|
||||
ObjectMeta(
|
||||
builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"),
|
||||
).
|
||||
ClaimRef("ns-1", "pvc-1").
|
||||
Result(),
|
||||
),
|
||||
},
|
||||
@@ -2100,13 +2101,12 @@ func TestRestorePersistentVolumes(t *testing.T) {
|
||||
want: []*test.APIResource{
|
||||
test.PVs(
|
||||
builder.ForPersistentVolume("source-pv").AWSEBSVolumeID("source-volume").ClaimRef("source-ns", "pvc-1").Result(),
|
||||
// note that the renamed PV is not expected to have a claimRef in this test; that would be
|
||||
// added after creation by the Kubernetes PV/PVC controller when it does a bind.
|
||||
builder.ForPersistentVolume("renamed-source-pv").
|
||||
ObjectMeta(
|
||||
builder.WithAnnotations("velero.io/original-pv-name", "source-pv"),
|
||||
builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"),
|
||||
).
|
||||
// the namespace for this PV's claimRef should be the one that the PVC was remapped into.
|
||||
).ClaimRef("target-ns", "pvc-1").
|
||||
AWSEBSVolumeID("new-volume").
|
||||
Result(),
|
||||
),
|
||||
@@ -2165,6 +2165,7 @@ func TestRestorePersistentVolumes(t *testing.T) {
|
||||
ObjectMeta(
|
||||
builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"),
|
||||
).
|
||||
ClaimRef("target-ns", "pvc-1").
|
||||
AWSEBSVolumeID("new-volume").
|
||||
Result(),
|
||||
),
|
||||
@@ -2178,6 +2179,67 @@ func TestRestorePersistentVolumes(t *testing.T) {
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when a PV is renamed and the original PV does not exist in-cluster, the PV should be renamed",
|
||||
restore: defaultRestore().NamespaceMappings("source-ns", "target-ns").Result(),
|
||||
backup: defaultBackup().Result(),
|
||||
tarball: newTarWriter(t).
|
||||
addItems(
|
||||
"persistentvolumes",
|
||||
builder.ForPersistentVolume("source-pv").AWSEBSVolumeID("source-volume").ClaimRef("source-ns", "pvc-1").Result(),
|
||||
).
|
||||
addItems(
|
||||
"persistentvolumeclaims",
|
||||
builder.ForPersistentVolumeClaim("source-ns", "pvc-1").VolumeName("source-pv").Result(),
|
||||
).
|
||||
done(),
|
||||
apiResources: []*test.APIResource{
|
||||
test.PVs(),
|
||||
test.PVCs(),
|
||||
},
|
||||
volumeSnapshots: []*volume.Snapshot{
|
||||
{
|
||||
Spec: volume.SnapshotSpec{
|
||||
BackupName: "backup-1",
|
||||
Location: "default",
|
||||
PersistentVolumeName: "source-pv",
|
||||
},
|
||||
Status: volume.SnapshotStatus{
|
||||
Phase: volume.SnapshotPhaseCompleted,
|
||||
ProviderSnapshotID: "snapshot-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{
|
||||
builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "default").Provider("provider-1").Result(),
|
||||
},
|
||||
volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{
|
||||
"provider-1": &volumeSnapshotter{
|
||||
snapshotVolumes: map[string]string{"snapshot-1": "new-pvname"},
|
||||
pvName: map[string]string{"new-pvname": "new-pvname"},
|
||||
},
|
||||
},
|
||||
want: []*test.APIResource{
|
||||
test.PVs(
|
||||
builder.ForPersistentVolume("new-pvname").
|
||||
ObjectMeta(
|
||||
builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"),
|
||||
builder.WithAnnotations("velero.io/original-pv-name", "source-pv"),
|
||||
).
|
||||
ClaimRef("target-ns", "pvc-1").
|
||||
AWSEBSVolumeID("new-pvname").
|
||||
Result(),
|
||||
),
|
||||
test.PVCs(
|
||||
builder.ForPersistentVolumeClaim("target-ns", "pvc-1").
|
||||
ObjectMeta(
|
||||
builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"),
|
||||
).
|
||||
VolumeName("new-pvname").
|
||||
Result(),
|
||||
),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when a PV with a reclaim policy of retain has a snapshot and exists in-cluster, neither the snapshot nor the PV are restored",
|
||||
restore: defaultRestore().Result(),
|
||||
@@ -2284,13 +2346,12 @@ func TestRestorePersistentVolumes(t *testing.T) {
|
||||
want: []*test.APIResource{
|
||||
test.PVs(
|
||||
builder.ForPersistentVolume("source-pv").AWSEBSVolumeID("source-volume").ClaimRef("source-ns", "pvc-1").Result(),
|
||||
// note that the renamed PV is not expected to have a claimRef in this test; that would be
|
||||
// added after creation by the Kubernetes PV/PVC controller when it does a bind.
|
||||
builder.ForPersistentVolume("volumesnapshotter-renamed-source-pv").
|
||||
ObjectMeta(
|
||||
builder.WithAnnotations("velero.io/original-pv-name", "source-pv"),
|
||||
builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"),
|
||||
).
|
||||
ClaimRef("target-ns", "pvc-1").
|
||||
AWSEBSVolumeID("new-volume").
|
||||
Result(),
|
||||
),
|
||||
@@ -2870,3 +2931,49 @@ func (h *harness) addItems(t *testing.T, resource *test.APIResource) {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_resetVolumeBindingInfo(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
obj *unstructured.Unstructured
|
||||
expected *unstructured.Unstructured
|
||||
}{
|
||||
{
|
||||
name: "PVs that are bound have their binding and dynamic provisioning annotations removed",
|
||||
obj: NewTestUnstructured().WithMetadataField("kind", "persistentVolume").
|
||||
WithName("pv-1").WithAnnotations(
|
||||
KubeAnnBindCompleted,
|
||||
KubeAnnBoundByController,
|
||||
KubeAnnDynamicallyProvisioned,
|
||||
).WithSpecField("claimRef", map[string]interface{}{
|
||||
"namespace": "ns-1",
|
||||
"name": "pvc-1",
|
||||
"uid": "abc",
|
||||
"resourceVersion": "1"}).Unstructured,
|
||||
expected: NewTestUnstructured().WithMetadataField("kind", "persistentVolume").
|
||||
WithName("pv-1").
|
||||
WithAnnotations().
|
||||
WithSpecField("claimRef", map[string]interface{}{
|
||||
"namespace": "ns-1", "name": "pvc-1"}).Unstructured,
|
||||
},
|
||||
{
|
||||
name: "PVCs that are bound have their binding annotations removed, but the volume name stays",
|
||||
obj: NewTestUnstructured().WithMetadataField("kind", "persistentVolumeClaim").
|
||||
WithName("pvc-1").WithAnnotations(
|
||||
KubeAnnBindCompleted,
|
||||
KubeAnnBoundByController,
|
||||
KubeAnnDynamicallyProvisioned,
|
||||
).WithSpecField("volumeName", "pv-1").Unstructured,
|
||||
expected: NewTestUnstructured().WithMetadataField("kind", "persistentVolumeClaim").
|
||||
WithName("pvc-1").WithAnnotations().
|
||||
WithSpecField("volumeName", "pv-1").Unstructured,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actual := resetVolumeBindingInfo(tc.obj)
|
||||
assert.Equal(t, tc.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,8 +33,8 @@ If you are ready to jump in and test, add code, or help with documentation, foll
|
||||
|
||||
See [the list of releases][6] to find out about feature changes.
|
||||
|
||||
[1]: https://travis-ci.org/vmware-tanzu/velero.svg?branch=master
|
||||
[2]: https://travis-ci.org/vmware-tanzu/velero
|
||||
[1]: https://github.com/vmware-tanzu/velero/workflows/Master%20CI/badge.svg
|
||||
[2]: https://github.com/vmware-tanzu/velero/actions?query=workflow%3A"Master+CI"
|
||||
|
||||
[4]: https://github.com/vmware-tanzu/velero/issues
|
||||
[6]: https://github.com/vmware-tanzu/velero/releases
|
||||
|
||||
@@ -33,8 +33,8 @@ If you are ready to jump in and test, add code, or help with documentation, foll
|
||||
|
||||
See [the list of releases][6] to find out about feature changes.
|
||||
|
||||
[1]: https://travis-ci.org/vmware-tanzu/velero.svg?branch=master
|
||||
[2]: https://travis-ci.org/vmware-tanzu/velero
|
||||
[1]: https://github.com/vmware-tanzu/velero/workflows/Master%20CI/badge.svg
|
||||
[2]: https://github.com/vmware-tanzu/velero/actions?query=workflow%3A"Master+CI"
|
||||
|
||||
[4]: https://github.com/vmware-tanzu/velero/issues
|
||||
[6]: https://github.com/vmware-tanzu/velero/releases
|
||||
|
||||
Reference in New Issue
Block a user