mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-04-19 23:30:42 +00:00
Merge pull request #8797 from blackpiglet/update_vs_vsc_name
Some checks failed
Run the E2E test on kind / build (push) Failing after 6m18s
Run the E2E test on kind / setup-test-matrix (push) Successful in 2s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / Build (push) Failing after 35s
Close stale issues and PRs / stale (push) Successful in 8s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 1m10s
Trivy Nightly Scan / Trivy nightly scan (velero-restore-helper, main) (push) Failing after 58s
Some checks failed
Run the E2E test on kind / build (push) Failing after 6m18s
Run the E2E test on kind / setup-test-matrix (push) Successful in 2s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / Build (push) Failing after 35s
Close stale issues and PRs / stale (push) Successful in 8s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 1m10s
Trivy Nightly Scan / Trivy nightly scan (velero-restore-helper, main) (push) Failing after 58s
Modify how the restore workflow using the resource name
This commit is contained in:
1
changelogs/unreleased/8797-blackpiglet
Normal file
1
changelogs/unreleased/8797-blackpiglet
Normal file
@@ -0,0 +1 @@
|
||||
Refactor restoreItem and related functions to differentiate the backup resource name and the restore target resource name.
|
||||
@@ -1073,21 +1073,21 @@ func getResourceID(groupResource schema.GroupResource, namespace, name string) s
|
||||
return fmt.Sprintf("%s/%s/%s", groupResource.String(), namespace, name)
|
||||
}
|
||||
|
||||
func (ctx *restoreContext) getResource(groupResource schema.GroupResource, obj *unstructured.Unstructured, namespace, name string) (*unstructured.Unstructured, error) {
|
||||
func (ctx *restoreContext) getResource(groupResource schema.GroupResource, obj *unstructured.Unstructured, namespace string) (*unstructured.Unstructured, error) {
|
||||
lister, err := ctx.getResourceLister(groupResource, obj, namespace)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error getting lister for %s", getResourceID(groupResource, namespace, name))
|
||||
return nil, errors.Wrapf(err, "Error getting lister for %s", getResourceID(groupResource, namespace, obj.GetName()))
|
||||
}
|
||||
clusterObj, err := lister.Get(name)
|
||||
clusterObj, err := lister.Get(obj.GetName())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting resource from lister for %s, %s/%s", groupResource, namespace, name)
|
||||
return nil, errors.Wrapf(err, "error getting resource from lister for %s, %s/%s", groupResource, namespace, obj.GetName())
|
||||
}
|
||||
u, ok := clusterObj.(*unstructured.Unstructured)
|
||||
if !ok {
|
||||
ctx.log.WithError(errors.WithStack(fmt.Errorf("expected *unstructured.Unstructured but got %T", u))).Error("unable to understand entry returned from client")
|
||||
return nil, fmt.Errorf("expected *unstructured.Unstructured but got %T", u)
|
||||
}
|
||||
ctx.log.Debugf("get %s, %s/%s from informer cache", groupResource, namespace, name)
|
||||
ctx.log.Debugf("get %s, %s/%s from informer cache", groupResource, namespace, obj.GetName())
|
||||
return u, nil
|
||||
}
|
||||
|
||||
@@ -1097,10 +1097,11 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
itemExists := false
|
||||
resourceID := getResourceID(groupResource, namespace, obj.GetName())
|
||||
resourceKind := obj.GetKind()
|
||||
backupResourceName := obj.GetName()
|
||||
|
||||
restoreLogger := ctx.log.WithFields(logrus.Fields{
|
||||
"namespace": obj.GetNamespace(),
|
||||
"name": obj.GetName(),
|
||||
"original name": backupResourceName,
|
||||
"groupResource": groupResource.String(),
|
||||
})
|
||||
|
||||
@@ -1127,7 +1128,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
// If the namespace scoped resource should be restored, ensure that the
|
||||
// namespace into which the resource is being restored into exists.
|
||||
// This is the *remapped* namespace that we are ensuring exists.
|
||||
nsToEnsure := getNamespace(ctx.log, archive.GetItemFilePath(ctx.restoreDir, "namespaces", "", obj.GetNamespace()), namespace)
|
||||
nsToEnsure := getNamespace(restoreLogger, archive.GetItemFilePath(ctx.restoreDir, "namespaces", "", obj.GetNamespace()), namespace)
|
||||
_, nsCreated, err := kube.EnsureNamespaceExistsAndIsReady(nsToEnsure, ctx.namespaceClient, ctx.resourceTerminatingTimeout, ctx.resourceDeletionStatusTracker)
|
||||
if err != nil {
|
||||
errs.AddVeleroError(err)
|
||||
@@ -1159,20 +1160,18 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
if complete {
|
||||
ctx.log.Infof("%s is complete - skipping", kube.NamespaceAndName(obj))
|
||||
restoreLogger.Infof("%s is complete - skipping", kube.NamespaceAndName(obj))
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
|
||||
name := obj.GetName()
|
||||
|
||||
// Check if we've already restored this itemKey.
|
||||
itemKey := itemKey{
|
||||
resource: resourceKey(obj),
|
||||
namespace: namespace,
|
||||
name: name,
|
||||
name: backupResourceName,
|
||||
}
|
||||
if prevRestoredItemStatus, exists := ctx.restoredItems[itemKey]; exists {
|
||||
ctx.log.Infof("Skipping %s because it's already been restored.", resourceID)
|
||||
restoreLogger.Infof("Skipping %s because it's already been restored.", resourceID)
|
||||
itemExists = prevRestoredItemStatus.itemExists
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
@@ -1197,7 +1196,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
// TODO: move to restore item action if/when we add a ShouldRestore() method
|
||||
// to the interface.
|
||||
if groupResource == kuberesource.Pods && obj.GetAnnotations()[v1.MirrorPodAnnotationKey] != "" {
|
||||
ctx.log.Infof("Not restoring pod because it's a mirror pod")
|
||||
restoreLogger.Infof("Not restoring pod because it's a mirror pod")
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
|
||||
@@ -1209,7 +1208,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
}
|
||||
|
||||
if volumeInfo, ok := ctx.backupVolumeInfoMap[obj.GetName()]; ok {
|
||||
ctx.log.Infof("Find BackupVolumeInfo for PV %s.", obj.GetName())
|
||||
restoreLogger.Infof("Find BackupVolumeInfo for PV %s.", obj.GetName())
|
||||
|
||||
switch volumeInfo.BackupMethod {
|
||||
case volume.NativeSnapshot:
|
||||
@@ -1219,11 +1218,9 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
|
||||
name = obj.GetName()
|
||||
|
||||
case volume.PodVolumeBackup:
|
||||
restoreLogger.Infof("Dynamically re-provisioning persistent volume because it has a pod volume backup to be restored.")
|
||||
ctx.pvsToProvision.Insert(name)
|
||||
ctx.pvsToProvision.Insert(backupResourceName)
|
||||
|
||||
// Return early because we don't want to restore the PV itself, we
|
||||
// want to dynamically re-provision it.
|
||||
@@ -1231,7 +1228,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
|
||||
case volume.CSISnapshot:
|
||||
restoreLogger.Infof("Dynamically re-provisioning persistent volume because it has a CSI VolumeSnapshot or a related snapshot DataUpload.")
|
||||
ctx.pvsToProvision.Insert(name)
|
||||
ctx.pvsToProvision.Insert(backupResourceName)
|
||||
|
||||
// Return early because we don't want to restore the PV itself, we
|
||||
// want to dynamically re-provision it.
|
||||
@@ -1242,7 +1239,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
default:
|
||||
if hasDeleteReclaimPolicy(obj.Object) {
|
||||
restoreLogger.Infof("Dynamically re-provisioning persistent volume because it doesn't have a snapshot and its reclaim policy is Delete.")
|
||||
ctx.pvsToProvision.Insert(name)
|
||||
ctx.pvsToProvision.Insert(backupResourceName)
|
||||
|
||||
// Return early because we don't want to restore the PV itself, we
|
||||
// want to dynamically re-provision it.
|
||||
@@ -1258,21 +1255,19 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
} else {
|
||||
// TODO: BackupVolumeInfo is adopted and old logic is deprecated in v1.13.
|
||||
// Remove the old logic in v1.15.
|
||||
ctx.log.Infof("Cannot find BackupVolumeInfo for PV %s.", obj.GetName())
|
||||
restoreLogger.Infof("Cannot find BackupVolumeInfo for PV %s.", obj.GetName())
|
||||
|
||||
switch {
|
||||
case hasSnapshot(name, ctx.volumeSnapshots):
|
||||
case hasSnapshot(backupResourceName, ctx.volumeSnapshots):
|
||||
obj, err = ctx.handlePVHasNativeSnapshot(obj, resourceClient)
|
||||
if err != nil {
|
||||
errs.Add(namespace, err)
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
|
||||
name = obj.GetName()
|
||||
|
||||
case hasPodVolumeBackup(obj, ctx):
|
||||
restoreLogger.Infof("Dynamically re-provisioning persistent volume because it has a pod volume backup to be restored.")
|
||||
ctx.pvsToProvision.Insert(name)
|
||||
ctx.pvsToProvision.Insert(backupResourceName)
|
||||
|
||||
// Return early because we don't want to restore the PV itself, we
|
||||
// want to dynamically re-provision it.
|
||||
@@ -1282,7 +1277,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
fallthrough
|
||||
case hasSnapshotDataUpload(ctx, obj):
|
||||
restoreLogger.Infof("Dynamically re-provisioning persistent volume because it has a CSI VolumeSnapshot or a related snapshot DataUpload.")
|
||||
ctx.pvsToProvision.Insert(name)
|
||||
ctx.pvsToProvision.Insert(backupResourceName)
|
||||
|
||||
// Return early because we don't want to restore the PV itself, we
|
||||
// want to dynamically re-provision it.
|
||||
@@ -1290,7 +1285,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
|
||||
case hasDeleteReclaimPolicy(obj.Object):
|
||||
restoreLogger.Infof("Dynamically re-provisioning persistent volume because it doesn't have a snapshot and its reclaim policy is Delete.")
|
||||
ctx.pvsToProvision.Insert(name)
|
||||
ctx.pvsToProvision.Insert(backupResourceName)
|
||||
|
||||
// Return early because we don't want to restore the PV itself, we
|
||||
// want to dynamically re-provision it.
|
||||
@@ -1313,7 +1308,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
|
||||
ctx.log.Infof("restore status includes excludes: %+v", ctx.resourceStatusIncludesExcludes)
|
||||
restoreLogger.Infof("restore status includes excludes: %+v", ctx.resourceStatusIncludesExcludes)
|
||||
|
||||
for _, action := range ctx.getApplicableActions(groupResource, namespace) {
|
||||
if !action.Selector.Matches(labels.Set(obj.GetLabels())) {
|
||||
@@ -1322,12 +1317,12 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
|
||||
// If the EnableCSI feature is not enabled, but the executing action is from CSI plugin, skip the action.
|
||||
if csiutil.ShouldSkipAction(action.Name()) {
|
||||
ctx.log.Infof("Skip action %s for resource %s:%s/%s, because the CSI feature is not enabled. Feature setting is %s.",
|
||||
restoreLogger.Infof("Skip action %s for resource %s:%s/%s, because the CSI feature is not enabled. Feature setting is %s.",
|
||||
action.Name(), groupResource.String(), obj.GetNamespace(), obj.GetName(), features.Serialize())
|
||||
continue
|
||||
}
|
||||
|
||||
ctx.log.Infof("Executing item action for %v", &groupResource)
|
||||
restoreLogger.Infof("Executing item action for %v", &groupResource)
|
||||
executeOutput, err := action.RestoreItemAction.Execute(&velero.RestoreItemActionExecuteInput{
|
||||
Item: obj,
|
||||
ItemFromBackup: itemFromBackup,
|
||||
@@ -1343,7 +1338,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
resourceIdentifier := velero.ResourceIdentifier{
|
||||
GroupResource: groupResource,
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
Name: backupResourceName,
|
||||
}
|
||||
now := metav1.Now()
|
||||
newOperation := itemoperation.RestoreOperation{
|
||||
@@ -1363,7 +1358,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
*itemOperList = append(*itemOperList, &newOperation)
|
||||
}
|
||||
if executeOutput.SkipRestore {
|
||||
ctx.log.Infof("Skipping restore of %s: %v because a registered plugin discarded it", obj.GroupVersionKind().Kind, name)
|
||||
restoreLogger.Infof("Skipping restore because a registered plugin discarded it")
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
unstructuredObj, ok := executeOutput.UpdatedItem.(*unstructured.Unstructured)
|
||||
@@ -1379,7 +1374,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
itemPath := archive.GetItemFilePath(ctx.restoreDir, additionalItem.GroupResource.String(), additionalItem.Namespace, additionalItem.Name)
|
||||
|
||||
if _, err := ctx.fileSystem.Stat(itemPath); err != nil {
|
||||
ctx.log.WithError(err).WithFields(logrus.Fields{
|
||||
restoreLogger.WithError(err).WithFields(logrus.Fields{
|
||||
"additionalResource": additionalItem.GroupResource.String(),
|
||||
"additionalResourceNamespace": additionalItem.Namespace,
|
||||
"additionalResourceName": additionalItem.Name,
|
||||
@@ -1441,13 +1436,13 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
// This is the case for PVB volumes, where we need to actually have an empty volume created instead of restoring one.
|
||||
// The assumption is that any PV in pvsToProvision doesn't have an associated snapshot.
|
||||
if ctx.pvsToProvision.Has(pvc.Spec.VolumeName) {
|
||||
ctx.log.Infof("Resetting PersistentVolumeClaim %s/%s for dynamic provisioning", namespace, name)
|
||||
restoreLogger.Infof("Resetting PersistentVolumeClaim for dynamic provisioning")
|
||||
unstructured.RemoveNestedField(obj.Object, "spec", "volumeName")
|
||||
}
|
||||
}
|
||||
|
||||
if newName, ok := ctx.renamedPVs[pvc.Spec.VolumeName]; ok {
|
||||
ctx.log.Infof("Updating persistent volume claim %s/%s to reference renamed persistent volume (%s -> %s)", namespace, name, pvc.Spec.VolumeName, newName)
|
||||
restoreLogger.Infof("Updating persistent volume claim %s/%s to reference renamed persistent volume (%s -> %s)", namespace, obj.GetName(), pvc.Spec.VolumeName, newName)
|
||||
if err := unstructured.SetNestedField(obj.Object, newName, "spec", "volumeName"); err != nil {
|
||||
errs.Add(namespace, err)
|
||||
return warnings, errs, itemExists
|
||||
@@ -1456,7 +1451,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
}
|
||||
|
||||
if ctx.resourceModifiers != nil {
|
||||
if errList := ctx.resourceModifiers.ApplyResourceModifierRules(obj, groupResource.String(), ctx.kbClient.Scheme(), ctx.log); errList != nil {
|
||||
if errList := ctx.resourceModifiers.ApplyResourceModifierRules(obj, groupResource.String(), ctx.kbClient.Scheme(), restoreLogger); errList != nil {
|
||||
for _, err := range errList {
|
||||
errs.Add(namespace, err)
|
||||
}
|
||||
@@ -1482,7 +1477,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
// like from Openshift DeploymentConfig to native Deployment.
|
||||
// we should re-get the newGR.Resource again in such a case
|
||||
if obj.GetKind() != resourceKind {
|
||||
ctx.log.Infof("Resource kind changed from %s to %s", resourceKind, obj.GetKind())
|
||||
restoreLogger.Infof("Resource kind changed from %s to %s", resourceKind, obj.GetKind())
|
||||
gvr, _, err := ctx.discoveryHelper.KindFor(obj.GroupVersionKind())
|
||||
if err != nil {
|
||||
errs.Add(namespace, fmt.Errorf("error getting GVR for %s: %v", obj.GroupVersionKind(), err))
|
||||
@@ -1491,7 +1486,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
newGR.Resource = gvr.Resource
|
||||
}
|
||||
if !reflect.DeepEqual(newGR, groupResource) {
|
||||
ctx.log.Infof("Resource to be restored changed from %v to %v", groupResource, newGR)
|
||||
restoreLogger.Infof("Resource to be restored changed from %v to %v", groupResource, newGR)
|
||||
}
|
||||
resourceClient, err := ctx.getResourceClient(newGR, obj, obj.GetNamespace())
|
||||
if err != nil {
|
||||
@@ -1499,7 +1494,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
|
||||
ctx.log.Infof("Attempting to restore %s: %v", obj.GroupVersionKind().Kind, name)
|
||||
restoreLogger.Infof("Attempting to restore %s: %s.", obj.GroupVersionKind().Kind, obj.GetName())
|
||||
|
||||
// check if we want to treat the error as a warning, in some cases the creation call might not get executed due to object API validations
|
||||
// and Velero might not get the already exists error type but in reality the object already exists
|
||||
@@ -1509,12 +1504,12 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
// only attempt Get before Create if using informer cache, otherwise this will slow down restore into
|
||||
// new namespace
|
||||
if !ctx.disableInformerCache {
|
||||
ctx.log.Debugf("Checking for existence %s: %v", obj.GroupVersionKind().Kind, name)
|
||||
fromCluster, err = ctx.getResource(newGR, obj, namespace, name)
|
||||
restoreLogger.Debugf("Checking for existence %s", obj.GetName())
|
||||
fromCluster, err = ctx.getResource(newGR, obj, namespace)
|
||||
}
|
||||
if err != nil || fromCluster == nil {
|
||||
// couldn't find the resource, attempt to create
|
||||
ctx.log.Debugf("Creating %s: %v", obj.GroupVersionKind().Kind, name)
|
||||
restoreLogger.Debugf("Creating %s", obj.GetName())
|
||||
createdObj, restoreErr = resourceClient.Create(obj)
|
||||
if restoreErr == nil {
|
||||
itemExists = true
|
||||
@@ -1533,12 +1528,12 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
// and if err then itemExists remains false as we were not able to confirm the existence of the object via Get call or creation call.
|
||||
// We return the get error as a warning to notify the user that the object could exist in cluster and we were not able to confirm it.
|
||||
if !ctx.disableInformerCache {
|
||||
fromCluster, err = ctx.getResource(newGR, obj, namespace, name)
|
||||
fromCluster, err = ctx.getResource(newGR, obj, namespace)
|
||||
} else {
|
||||
fromCluster, err = resourceClient.Get(name, metav1.GetOptions{})
|
||||
fromCluster, err = resourceClient.Get(obj.GetName(), metav1.GetOptions{})
|
||||
}
|
||||
if err != nil && isAlreadyExistsError {
|
||||
ctx.log.Warnf("Unable to retrieve in-cluster version of %s: %v, object won't be restored by velero or have restore labels, and existing resource policy is not applied", kube.NamespaceAndName(obj), err)
|
||||
restoreLogger.Warnf("Unable to retrieve in-cluster version of %s: %s, object won't be restored by velero or have restore labels, and existing resource policy is not applied", kube.NamespaceAndName(obj), err.Error())
|
||||
warnings.Add(namespace, err)
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
@@ -1552,7 +1547,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
// Remove insubstantial metadata.
|
||||
fromCluster, err = resetMetadataAndStatus(fromCluster)
|
||||
if err != nil {
|
||||
ctx.log.Infof("Error trying to reset metadata for %s: %v", kube.NamespaceAndName(obj), err)
|
||||
restoreLogger.Infof("Error trying to reset metadata for %s: %s", kube.NamespaceAndName(obj), err.Error())
|
||||
warnings.Add(namespace, err)
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
@@ -1568,14 +1563,14 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
case kuberesource.ServiceAccounts:
|
||||
desired, err := mergeServiceAccounts(fromCluster, obj)
|
||||
if err != nil {
|
||||
ctx.log.Infof("error merging secrets for ServiceAccount %s: %v", kube.NamespaceAndName(obj), err)
|
||||
restoreLogger.Infof("error merging secrets for ServiceAccount %s: %s", kube.NamespaceAndName(obj), err.Error())
|
||||
warnings.Add(namespace, err)
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
|
||||
patchBytes, err := generatePatch(fromCluster, desired)
|
||||
if err != nil {
|
||||
ctx.log.Infof("error generating patch for ServiceAccount %s: %v", kube.NamespaceAndName(obj), err)
|
||||
restoreLogger.Infof("error generating patch for ServiceAccount %s: %s", kube.NamespaceAndName(obj), err.Error())
|
||||
warnings.Add(namespace, err)
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
@@ -1586,7 +1581,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
|
||||
_, err = resourceClient.Patch(name, patchBytes)
|
||||
_, err = resourceClient.Patch(obj.GetName(), patchBytes)
|
||||
if err != nil {
|
||||
warnings.Add(namespace, err)
|
||||
// check if there is existingResourcePolicy and if it is set to update policy
|
||||
@@ -1601,13 +1596,13 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
} else {
|
||||
itemStatus.action = ItemRestoreResultUpdated
|
||||
ctx.restoredItems[itemKey] = itemStatus
|
||||
ctx.log.Infof("ServiceAccount %s successfully updated", kube.NamespaceAndName(obj))
|
||||
restoreLogger.Infof("ServiceAccount %s successfully updated", kube.NamespaceAndName(obj))
|
||||
}
|
||||
default:
|
||||
// check for the presence of existingResourcePolicy
|
||||
if len(ctx.restore.Spec.ExistingResourcePolicy) > 0 {
|
||||
resourcePolicy := ctx.restore.Spec.ExistingResourcePolicy
|
||||
ctx.log.Infof("restore API has resource policy defined %s , executing restore workflow accordingly for changed resource %s %s", resourcePolicy, fromCluster.GroupVersionKind().Kind, kube.NamespaceAndName(fromCluster))
|
||||
restoreLogger.Infof("restore API has resource policy defined %s, executing restore workflow accordingly for changed resource %s %s", resourcePolicy, fromCluster.GroupVersionKind().Kind, kube.NamespaceAndName(fromCluster))
|
||||
|
||||
// existingResourcePolicy is set as none, add warning
|
||||
if resourcePolicy == velerov1api.PolicyTypeNone {
|
||||
@@ -1627,7 +1622,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
}
|
||||
} else {
|
||||
// Preserved Velero behavior when existingResourcePolicy is not specified by the user
|
||||
e := errors.Errorf("could not restore, %s %q already exists. Warning: the in-cluster version is different than the backed-up version",
|
||||
e := errors.Errorf("could not restore, %s:%s already exists. Warning: the in-cluster version is different than the backed-up version",
|
||||
obj.GetKind(), obj.GetName())
|
||||
warnings.Add(namespace, e)
|
||||
}
|
||||
@@ -1638,7 +1633,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
//update backup/restore labels on the unchanged resources if existingResourcePolicy is set as update
|
||||
if ctx.restore.Spec.ExistingResourcePolicy == velerov1api.PolicyTypeUpdate {
|
||||
resourcePolicy := ctx.restore.Spec.ExistingResourcePolicy
|
||||
ctx.log.Infof("restore API has resource policy defined %s , executing restore workflow accordingly for unchanged resource %s %s ", resourcePolicy, obj.GroupVersionKind().Kind, kube.NamespaceAndName(fromCluster))
|
||||
restoreLogger.Infof("restore API has resource policy defined %s, executing restore workflow accordingly for unchanged resource %s %s ", resourcePolicy, obj.GroupVersionKind().Kind, kube.NamespaceAndName(fromCluster))
|
||||
// remove restore labels so that we apply the latest backup/restore names on the object via patch
|
||||
removeRestoreLabels(fromCluster)
|
||||
// try updating the backup/restore labels for the in-cluster object
|
||||
@@ -1647,23 +1642,23 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
errs.Merge(&errsFromUpdate)
|
||||
}
|
||||
|
||||
ctx.log.Infof("Restore of %s, %v skipped: it already exists in the cluster and is the same as the backed up version", obj.GroupVersionKind().Kind, name)
|
||||
restoreLogger.Infof("Restore of %s skipped: it already exists in the cluster and is the same as the backed up version", obj.GetName())
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
|
||||
// Error was something other than an AlreadyExists.
|
||||
if restoreErr != nil {
|
||||
ctx.log.Errorf("error restoring %s: %+v", name, restoreErr)
|
||||
restoreLogger.Errorf("error restoring %s: %s", obj.GetName(), restoreErr.Error())
|
||||
errs.Add(namespace, fmt.Errorf("error restoring %s: %v", resourceID, restoreErr))
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
|
||||
// determine whether to restore status according to original GR
|
||||
shouldRestoreStatus := determineRestoreStatus(obj, ctx.resourceStatusIncludesExcludes, groupResource.String(), ctx.log)
|
||||
shouldRestoreStatus := determineRestoreStatus(obj, ctx.resourceStatusIncludesExcludes, groupResource.String(), restoreLogger)
|
||||
|
||||
if shouldRestoreStatus && statusFieldErr != nil {
|
||||
err := fmt.Errorf("could not get status to be restored %s: %v", kube.NamespaceAndName(obj), statusFieldErr)
|
||||
ctx.log.Errorf(err.Error())
|
||||
restoreLogger.Errorf(err.Error())
|
||||
errs.Add(namespace, err)
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
@@ -1671,7 +1666,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
// Proceed with status restoration if decided
|
||||
if statusFieldExists && shouldRestoreStatus {
|
||||
if err := unstructured.SetNestedField(obj.Object, objStatus, "status"); err != nil {
|
||||
ctx.log.Errorf("could not set status field %s: %v", kube.NamespaceAndName(obj), err)
|
||||
restoreLogger.Errorf("could not set status field %s: %s", kube.NamespaceAndName(obj), err.Error())
|
||||
errs.Add(namespace, err)
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
@@ -1682,7 +1677,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
updated, err := resourceClient.UpdateStatus(obj, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsConflict(err) {
|
||||
res, err := resourceClient.Get(name, metav1.GetOptions{})
|
||||
res, err := resourceClient.Get(obj.GetName(), metav1.GetOptions{})
|
||||
if err == nil {
|
||||
resourceVersion = res.GetResourceVersion()
|
||||
}
|
||||
@@ -1693,7 +1688,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
createdObj = updated
|
||||
return nil
|
||||
}); err != nil {
|
||||
ctx.log.Infof("status field update failed %s: %v", kube.NamespaceAndName(obj), err)
|
||||
restoreLogger.Infof("status field update failed %s: %s", kube.NamespaceAndName(obj), err.Error())
|
||||
warnings.Add(namespace, err)
|
||||
}
|
||||
}
|
||||
@@ -1703,19 +1698,19 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
createdObj.SetManagedFields(obj.GetManagedFields())
|
||||
patchBytes, err := generatePatch(withoutManagedFields, createdObj)
|
||||
if err != nil {
|
||||
ctx.log.Errorf("error generating patch for managed fields %s: %v", kube.NamespaceAndName(obj), err)
|
||||
restoreLogger.Errorf("error generating patch for managed fields %s: %s", kube.NamespaceAndName(obj), err.Error())
|
||||
errs.Add(namespace, err)
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
if patchBytes != nil {
|
||||
if _, err = resourceClient.Patch(name, patchBytes); err != nil {
|
||||
ctx.log.Errorf("error patch for managed fields %s: %v", kube.NamespaceAndName(obj), err)
|
||||
if _, err = resourceClient.Patch(obj.GetName(), patchBytes); err != nil {
|
||||
restoreLogger.Errorf("error patch for managed fields %s: %s", kube.NamespaceAndName(obj), err.Error())
|
||||
if !apierrors.IsNotFound(err) {
|
||||
errs.Add(namespace, err)
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
} else {
|
||||
ctx.log.Infof("the managed fields for %s is patched", kube.NamespaceAndName(obj))
|
||||
restoreLogger.Infof("the managed fields for %s is patched", kube.NamespaceAndName(obj))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1739,14 +1734,14 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
if newGR == kuberesource.Pods {
|
||||
pod := new(v1.Pod)
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(createdObj.UnstructuredContent(), &pod); err != nil {
|
||||
ctx.log.Errorf("error converting pod %s: %v", kube.NamespaceAndName(obj), err)
|
||||
restoreLogger.Errorf("error converting pod %s: %s", kube.NamespaceAndName(obj), err.Error())
|
||||
errs.Add(namespace, err)
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
|
||||
execHooksByContainer, err := ctx.hooksWaitExecutor.groupHooks(ctx.restore.Name, pod, ctx.multiHookTracker)
|
||||
if err != nil {
|
||||
ctx.log.Errorf("error grouping hooks from pod %s: %v", kube.NamespaceAndName(obj), err)
|
||||
restoreLogger.Errorf("error grouping hooks from pod %s: %s", kube.NamespaceAndName(obj), err.Error())
|
||||
errs.Add(namespace, err)
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
@@ -1757,11 +1752,11 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
// Wait for a CRD to be available for instantiating resources
|
||||
// before continuing.
|
||||
if newGR == kuberesource.CustomResourceDefinitions {
|
||||
available, err := ctx.crdAvailable(name, resourceClient)
|
||||
available, err := ctx.crdAvailable(obj.GetName(), resourceClient)
|
||||
if err != nil {
|
||||
errs.Add(namespace, errors.Wrapf(err, "error verifying the CRD %s is ready to use", name))
|
||||
errs.Add(namespace, errors.Wrapf(err, "error verifying the CRD %s is ready to use", obj.GetName()))
|
||||
} else if !available {
|
||||
errs.Add(namespace, fmt.Errorf("the CRD %s is not available to use for custom resources", name))
|
||||
errs.Add(namespace, fmt.Errorf("the CRD %s is not available to use for custom resources", obj.GetName()))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user