Merge branch 'release-1.9' into issue-4980

This commit is contained in:
qiuming
2022-09-07 17:41:10 +08:00
committed by GitHub
8 changed files with 45 additions and 15 deletions

View File

@@ -0,0 +1 @@
Fix nil pointer panic when restoring StatefulSets

View File

@@ -0,0 +1 @@
Check for empty ns list before checking nslist[0]

View File

@@ -0,0 +1 @@
check vsc null pointer

View File

@@ -0,0 +1 @@
Fix edge cases for already exists resources

View File

@@ -225,8 +225,11 @@ func (r *itemCollector) getResourceItems(log logrus.FieldLogger, gv schema.Group
namespacesToList := getNamespacesToList(r.backupRequest.NamespaceIncludesExcludes)
// Check if we're backing up namespaces, and only certain ones
if gr == kuberesource.Namespaces && namespacesToList[0] != "" {
// Check if we're backing up namespaces for a less-than-full backup.
// We enter this block if resource is Namespaces and the namespae list is either empty or contains
// an explicit namespace list. (We skip this block if the list contains "" since that indicates
// a full-cluster backup
if gr == kuberesource.Namespaces && (len(namespacesToList) == 0 || namespacesToList[0] != "") {
resourceClient, err := r.dynamicFactory.ClientForGroupVersionResource(gv, resource, "")
if err != nil {
log.WithError(err).Error("Error getting dynamic client")

View File

@@ -940,6 +940,10 @@ func (c *backupController) deleteVolumeSnapshot(volumeSnapshots []*snapshotv1api
if vs.Status.BoundVolumeSnapshotContentName != nil &&
len(*vs.Status.BoundVolumeSnapshotContentName) > 0 {
vsc = vscMap[*vs.Status.BoundVolumeSnapshotContentName]
if nil == vsc {
logger.Errorf("Not find %s from the vscMap", vs.Status.BoundVolumeSnapshotContentName)
return
}
if vsc.Spec.DeletionPolicy == snapshotv1api.VolumeSnapshotContentDelete {
modifyVSCFlag = true
}

View File

@@ -99,7 +99,7 @@ func (a *ChangeStorageClassAction) Execute(input *velero.RestoreItemActionExecut
if len(sts.Spec.VolumeClaimTemplates) > 0 {
for index, pvc := range sts.Spec.VolumeClaimTemplates {
exists, newStorageClass, err := a.isStorageClassExist(log, *pvc.Spec.StorageClassName, config)
exists, newStorageClass, err := a.isStorageClassExist(log, pvc.Spec.StorageClassName, config)
if err != nil {
return nil, err
} else if !exists {
@@ -124,7 +124,7 @@ func (a *ChangeStorageClassAction) Execute(input *velero.RestoreItemActionExecut
return nil, errors.Wrap(err, "error getting item's spec.storageClassName")
}
exists, newStorageClass, err := a.isStorageClassExist(log, storageClass, config)
exists, newStorageClass, err := a.isStorageClassExist(log, &storageClass, config)
if err != nil {
return nil, err
} else if !exists {
@@ -140,15 +140,15 @@ func (a *ChangeStorageClassAction) Execute(input *velero.RestoreItemActionExecut
return velero.NewRestoreItemActionExecuteOutput(obj), nil
}
func (a *ChangeStorageClassAction) isStorageClassExist(log *logrus.Entry, storageClass string, cm *corev1.ConfigMap) (exists bool, newStorageClass string, err error) {
if storageClass == "" {
func (a *ChangeStorageClassAction) isStorageClassExist(log *logrus.Entry, storageClass *string, cm *corev1.ConfigMap) (exists bool, newStorageClass string, err error) {
if storageClass == nil || *storageClass == "" {
log.Debug("Item has no storage class specified")
return false, "", nil
}
newStorageClass, ok := cm.Data[storageClass]
newStorageClass, ok := cm.Data[*storageClass]
if !ok {
log.Debugf("No mapping found for storage class %s", storageClass)
log.Debugf("No mapping found for storage class %s", *storageClass)
return false, "", nil
}

View File

@@ -1249,12 +1249,31 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
errs.Add(namespace, err)
return warnings, errs
}
if isAlreadyExistsError {
fromCluster, err := resourceClient.Get(name, metav1.GetOptions{})
if err != nil {
ctx.log.Infof("Error retrieving cluster version of %s: %v", kube.NamespaceAndName(obj), err)
warnings.Add(namespace, err)
return warnings, errs
// check if we want to treat the error as a warning, in some cases the creation call might not get executed due to object API validations
// and Velero might not get the already exists error type but in reality the object already exists
objectExists := false
var fromCluster *unstructured.Unstructured
if restoreErr != nil && !isAlreadyExistsError {
// check for the existence of the object in cluster, if no error then it implies that object exists
// and if err then we want to fallthrough and do another get call later
fromCluster, err = resourceClient.Get(name, metav1.GetOptions{})
if err == nil {
objectExists = true
}
}
if isAlreadyExistsError || objectExists {
// do a get call if we did not run this previously i.e.
// we've only run this for errors other than isAlreadyExistError
if fromCluster == nil {
fromCluster, err = resourceClient.Get(name, metav1.GetOptions{})
if err != nil {
ctx.log.Errorf("Error retrieving cluster version of %s: %v", kube.NamespaceAndName(obj), err)
errs.Add(namespace, err)
return warnings, errs
}
}
// Remove insubstantial metadata.
fromCluster, err = resetMetadataAndStatus(fromCluster)
@@ -2024,7 +2043,7 @@ func (ctx *restoreContext) processUpdateResourcePolicy(fromCluster, fromClusterW
// try patching the in-cluster resource (resource diff plus latest backup/restore labels)
_, err = resourceClient.Patch(obj.GetName(), patchBytes)
if err != nil {
ctx.log.Errorf("patch attempt failed for %s %s: %v", fromCluster.GroupVersionKind(), kube.NamespaceAndName(fromCluster), err)
ctx.log.Warnf("patch attempt failed for %s %s: %v", fromCluster.GroupVersionKind(), kube.NamespaceAndName(fromCluster), err)
warnings.Add(namespace, err)
// try just patching the labels
warningsFromUpdate, errsFromUpdate := ctx.updateBackupRestoreLabels(fromCluster, fromClusterWithLabels, namespace, resourceClient)