mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-03 11:45:20 +00:00
k8s 1.18 import (#2651)
* k8s 1.18 import wip backup, cmd, controller, generated, restic, restore, serverstatusrequest, test and util Signed-off-by: Andrew Lavery <laverya@umich.edu> * go mod tidy Signed-off-by: Andrew Lavery <laverya@umich.edu> * add changelog file Signed-off-by: Andrew Lavery <laverya@umich.edu> * go fmt Signed-off-by: Andrew Lavery <laverya@umich.edu> * update code-generator and controller-gen in CI Signed-off-by: Andrew Lavery <laverya@umich.edu> * checkout proper code-generator version, regen Signed-off-by: Andrew Lavery <laverya@umich.edu> * fix remaining calls Signed-off-by: Andrew Lavery <laverya@umich.edu> * regenerate CRDs with ./hack/update-generated-crd-code.sh Signed-off-by: Andrew Lavery <laverya@umich.edu> * use existing context in restic and server Signed-off-by: Andrew Lavery <laverya@umich.edu> * fix test cases by resetting resource version also use main library go context, not golang.org/x/net/context, in pkg/restore/restore.go Signed-off-by: Andrew Lavery <laverya@umich.edu> * clarify changelog message Signed-off-by: Andrew Lavery <laverya@umich.edu> * use github.com/kubernetes-csi/external-snapshotter/v2@v2.2.0-rc1 Signed-off-by: Andrew Lavery <laverya@umich.edu> * run 'go mod tidy' to remove old external-snapshotter version Signed-off-by: Andrew Lavery <laverya@umich.edu>
This commit is contained in:
@@ -223,7 +223,7 @@ func (c *backupSyncController) run() {
|
||||
backup.Labels[velerov1api.StorageLocationLabel] = label.GetValidName(backup.Spec.StorageLocation)
|
||||
|
||||
// attempt to create backup custom resource via API
|
||||
backup, err = c.backupClient.Backups(backup.Namespace).Create(backup)
|
||||
backup, err = c.backupClient.Backups(backup.Namespace).Create(context.TODO(), backup, metav1.CreateOptions{})
|
||||
switch {
|
||||
case err != nil && kuberrs.IsAlreadyExists(err):
|
||||
log.Debug("Backup already exists in cluster")
|
||||
@@ -260,7 +260,7 @@ func (c *backupSyncController) run() {
|
||||
podVolumeBackup.Namespace = backup.Namespace
|
||||
podVolumeBackup.ResourceVersion = ""
|
||||
|
||||
_, err = c.podVolumeBackupClient.PodVolumeBackups(backup.Namespace).Create(podVolumeBackup)
|
||||
_, err = c.podVolumeBackupClient.PodVolumeBackups(backup.Namespace).Create(context.TODO(), podVolumeBackup, metav1.CreateOptions{})
|
||||
switch {
|
||||
case err != nil && kuberrs.IsAlreadyExists(err):
|
||||
log.Debug("Pod volume backup already exists in cluster")
|
||||
@@ -287,7 +287,7 @@ func (c *backupSyncController) run() {
|
||||
for _, snapCont := range snapConts {
|
||||
// TODO: Reset ResourceVersion prior to persisting VolumeSnapshotContents
|
||||
snapCont.ResourceVersion = ""
|
||||
created, err := c.csiSnapshotClient.SnapshotV1beta1().VolumeSnapshotContents().Create(snapCont)
|
||||
created, err := c.csiSnapshotClient.SnapshotV1beta1().VolumeSnapshotContents().Create(context.TODO(), snapCont, metav1.CreateOptions{})
|
||||
switch {
|
||||
case err != nil && kuberrs.IsAlreadyExists(err):
|
||||
log.Debugf("volumesnapshotcontent %s already exists in cluster", snapCont.Name)
|
||||
@@ -336,7 +336,7 @@ func (c *backupSyncController) deleteOrphanedBackups(locationName string, backup
|
||||
continue
|
||||
}
|
||||
|
||||
if err := c.backupClient.Backups(backup.Namespace).Delete(backup.Name, nil); err != nil {
|
||||
if err := c.backupClient.Backups(backup.Namespace).Delete(context.TODO(), backup.Name, metav1.DeleteOptions{}); err != nil {
|
||||
log.WithError(errors.WithStack(err)).Error("Error deleting orphaned backup from cluster")
|
||||
} else {
|
||||
log.Debug("Deleted orphaned backup from cluster")
|
||||
|
||||
Reference in New Issue
Block a user