diff --git a/changelogs/unreleased/7052-qiuming-best b/changelogs/unreleased/7052-qiuming-best new file mode 100644 index 000000000..e1829fd74 --- /dev/null +++ b/changelogs/unreleased/7052-qiuming-best @@ -0,0 +1,2 @@ +Make data mover fail early + diff --git a/changelogs/unreleased/7138-blackpiglet b/changelogs/unreleased/7138-blackpiglet new file mode 100644 index 000000000..ccd5d0690 --- /dev/null +++ b/changelogs/unreleased/7138-blackpiglet @@ -0,0 +1 @@ +Use VolumeInfo to help restore the PV. \ No newline at end of file diff --git a/changelogs/unreleased/7150-Lyndon-Li b/changelogs/unreleased/7150-Lyndon-Li new file mode 100644 index 000000000..920544827 --- /dev/null +++ b/changelogs/unreleased/7150-Lyndon-Li @@ -0,0 +1 @@ +Fix issue #7135, check pod status before checking node-agent pod status \ No newline at end of file diff --git a/changelogs/unreleased/7152-reasonerjt b/changelogs/unreleased/7152-reasonerjt new file mode 100644 index 000000000..809921f33 --- /dev/null +++ b/changelogs/unreleased/7152-reasonerjt @@ -0,0 +1 @@ +Track the skipped PV when SnapshotVolumes set as false \ No newline at end of file diff --git a/changelogs/unreleased/7153-allenxu404 b/changelogs/unreleased/7153-allenxu404 new file mode 100644 index 000000000..a8faaf99c --- /dev/null +++ b/changelogs/unreleased/7153-allenxu404 @@ -0,0 +1 @@ +Enhance hooks tracker by adding a returned error to record function \ No newline at end of file diff --git a/design/Implemented/unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md b/design/Implemented/unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md index f1cc7f61e..48daff02f 100644 --- a/design/Implemented/unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md +++ b/design/Implemented/unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md @@ -433,23 +433,24 @@ spec: volume: nginx-log ``` We will add the flag for both CLI installation and Helm Chart Installation. Specifically: -- Helm Chart Installation: add the "--pod-volume-backup-uploader" flag into its value.yaml and then generate the deployments according to the value. Value.yaml is the user-provided configuration file, therefore, users could set this value at the time of installation. The changes in Value.yaml are as below: +- Helm Chart Installation: add the "--uploaderType" and "--default-volumes-to-fs-backup" flag into its value.yaml and then generate the deployments according to the value. Value.yaml is the user-provided configuration file, therefore, users could set this value at the time of installation. The changes in Value.yaml are as below: ``` command: - /velero args: - server {{- with .Values.configuration }} - {{- if .pod-volume-backup-uploader "restic" }} - - --legacy - {{- end }} + - --uploader-type={{ default "restic" .uploaderType }} + {{- if .defaultVolumesToFsBackup }} + - --default-volumes-to-fs-backup + {{- end }} ``` -- CLI Installation: add the "--pod-volume-backup-uploader" flag into the installation command line, and then create the two deployments accordingly. Users could change the option at the time of installation. The CLI is as below: -```velero install --pod-volume-backup-uploader=restic``` -```velero install --pod-volume-backup-uploader=kopia``` +- CLI Installation: add the "--uploaderType" and "--default-volumes-to-fs-backup" flag into the installation command line, and then create the two deployments accordingly. Users could change the option at the time of installation. The CLI is as below: +```velero install --uploader-type=restic --default-volumes-to-fs-backup --use-node-agent``` +```velero install --uploader-type=kopia --default-volumes-to-fs-backup --use-node-agent``` ## Upgrade -For upgrade, we allow users to change the path by specifying "--pod-volume-backup-uploader" flag in the same way as the fresh installation. Therefore, the flag change should be applied to the Velero server after upgrade. Additionally, We need to add a label to Velero server to indicate the current path, so as to provide an easy for querying it. +For upgrade, we allow users to change the path by specifying "--uploader-type" flag in the same way as the fresh installation. Therefore, the flag change should be applied to the Velero server after upgrade. Additionally, We need to add a label to Velero server to indicate the current path, so as to provide an easy for querying it. Moreover, if users upgrade from the old release, we need to change the existing Restic Daemonset name to VeleroNodeAgent daemonSet. The name change should be applied after upgrade. The recommended way for upgrade is to modify the related Velero resource directly through kubectl, the above changes will be applied in the same way. We need to modify the Velero doc for all these changes. @@ -459,7 +460,7 @@ Below Velero CLI or its output needs some changes: - ```Velero restore describe```: the output should indicate the path - ```Velero restic repo get```: the name of this CLI should be changed to a generic one, for example, "Velero repo get"; the output of this CLI should print all the backup repository if Restic repository and Unified Repository exist at the same time -At present, we don't have a requirement for selecting the path during backup, so we don't change the ```Velero backup create``` CLI for now. If there is a requirement in future, we could simply add a flag similar to "--pod-volume-backup-uploader" to select the path. +At present, we don't have a requirement for selecting the path during backup, so we don't change the ```Velero backup create``` CLI for now. If there is a requirement in future, we could simply add a flag similar to "--uploader-type" to select the path. ## CR Example Below sample files demonstrate complete CRs with all the changes mentioned above: diff --git a/design/pv_backup_info.md b/design/pv_backup_info.md index 107305fe5..90cd998cb 100644 --- a/design/pv_backup_info.md +++ b/design/pv_backup_info.md @@ -125,7 +125,7 @@ type BackupStore interface { ### How the VolumeInfo array is used. #### Generate the PVC backed-up information summary -The downstream tools can use this VolumeInfo array to format and display their volume information. This is in the scope of this feature. +The downstream tools can use this VolumeInfo array to format and display their volume information. This is not in the scope of this feature. #### Retrieve volume backed-up information for `velero backup describe` command The `velero backup describe` can also use this VolumeInfo array structure to display the volume information. The snapshot data mover volume should use this structure at first, then the Velero native snapshot, CSI snapshot, and PodVolumeBackup can also use this structure. The detailed implementation is also not in this feature's scope. diff --git a/internal/hook/hook_tracker.go b/internal/hook/hook_tracker.go index f4e2bb817..a0300d8f6 100644 --- a/internal/hook/hook_tracker.go +++ b/internal/hook/hook_tracker.go @@ -16,7 +16,10 @@ limitations under the License. package hook -import "sync" +import ( + "fmt" + "sync" +) const ( HookSourceAnnotation = "annotation" @@ -69,6 +72,8 @@ func NewHookTracker() *HookTracker { } // Add adds a hook to the tracker +// Add must precede the Record for each individual hook. +// In other words, a hook must be added to the tracker before its execution result is recorded. func (ht *HookTracker) Add(podNamespace, podName, container, source, hookName string, hookPhase hookPhase) { ht.lock.Lock() defer ht.lock.Unlock() @@ -91,7 +96,9 @@ func (ht *HookTracker) Add(podNamespace, podName, container, source, hookName st } // Record records the hook's execution status -func (ht *HookTracker) Record(podNamespace, podName, container, source, hookName string, hookPhase hookPhase, hookFailed bool) { +// Add must precede the Record for each individual hook. +// In other words, a hook must be added to the tracker before its execution result is recorded. +func (ht *HookTracker) Record(podNamespace, podName, container, source, hookName string, hookPhase hookPhase, hookFailed bool) error { ht.lock.Lock() defer ht.lock.Unlock() @@ -104,12 +111,16 @@ func (ht *HookTracker) Record(podNamespace, podName, container, source, hookName hookName: hookName, } + var err error if _, ok := ht.tracker[key]; ok { ht.tracker[key] = hookTrackerVal{ hookFailed: hookFailed, hookExecuted: true, } + } else { + err = fmt.Errorf("hook not exist in hooks tracker, hook key: %v", key) } + return err } // Stat calculates the number of attempted hooks and failed hooks diff --git a/internal/hook/hook_tracker_test.go b/internal/hook/hook_tracker_test.go index d104cc91d..9e6ca95d3 100644 --- a/internal/hook/hook_tracker_test.go +++ b/internal/hook/hook_tracker_test.go @@ -50,7 +50,7 @@ func TestHookTracker_Add(t *testing.T) { func TestHookTracker_Record(t *testing.T) { tracker := NewHookTracker() tracker.Add("ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre) - tracker.Record("ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre, true) + err := tracker.Record("ns1", "pod1", "container1", HookSourceAnnotation, "h1", PhasePre, true) key := hookTrackerKey{ podNamespace: "ns1", @@ -63,6 +63,11 @@ func TestHookTracker_Record(t *testing.T) { info := tracker.tracker[key] assert.True(t, info.hookFailed) + assert.Nil(t, err) + + err = tracker.Record("ns2", "pod2", "container1", HookSourceAnnotation, "h1", PhasePre, true) + assert.NotNil(t, err) + } func TestHookTracker_Stat(t *testing.T) { diff --git a/internal/hook/item_hook_handler.go b/internal/hook/item_hook_handler.go index 9075bc50f..8120386d6 100644 --- a/internal/hook/item_hook_handler.go +++ b/internal/hook/item_hook_handler.go @@ -233,14 +233,19 @@ func (h *DefaultItemHookHandler) HandleHooks( }, ) - hookTracker.Record(namespace, name, hookFromAnnotations.Container, HookSourceAnnotation, "", phase, false) - if err := h.PodCommandExecutor.ExecutePodCommand(hookLog, obj.UnstructuredContent(), namespace, name, "", hookFromAnnotations); err != nil { - hookLog.WithError(err).Error("Error executing hook") - hookTracker.Record(namespace, name, hookFromAnnotations.Container, HookSourceAnnotation, "", phase, true) + hookFailed := false + var errExec error + if errExec = h.PodCommandExecutor.ExecutePodCommand(hookLog, obj.UnstructuredContent(), namespace, name, "", hookFromAnnotations); errExec != nil { + hookLog.WithError(errExec).Error("Error executing hook") + hookFailed = true + } + errTracker := hookTracker.Record(namespace, name, hookFromAnnotations.Container, HookSourceAnnotation, "", phase, hookFailed) + if errTracker != nil { + hookLog.WithError(errTracker).Warn("Error recording the hook in hook tracker") + } - if hookFromAnnotations.OnError == velerov1api.HookErrorModeFail { - return err - } + if errExec != nil && hookFromAnnotations.OnError == velerov1api.HookErrorModeFail { + return errExec } return nil @@ -277,15 +282,19 @@ func (h *DefaultItemHookHandler) HandleHooks( }, ) - hookTracker.Record(namespace, name, hook.Exec.Container, HookSourceSpec, resourceHook.Name, phase, false) + hookFailed := false err := h.PodCommandExecutor.ExecutePodCommand(hookLog, obj.UnstructuredContent(), namespace, name, resourceHook.Name, hook.Exec) if err != nil { hookLog.WithError(err).Error("Error executing hook") - hookTracker.Record(namespace, name, hook.Exec.Container, HookSourceSpec, resourceHook.Name, phase, true) + hookFailed = true if hook.Exec.OnError == velerov1api.HookErrorModeFail { modeFailError = err } } + errTracker := hookTracker.Record(namespace, name, hook.Exec.Container, HookSourceSpec, resourceHook.Name, phase, hookFailed) + if errTracker != nil { + hookLog.WithError(errTracker).Warn("Error recording the hook in hook tracker") + } } } } diff --git a/internal/hook/wait_exec_hook_handler.go b/internal/hook/wait_exec_hook_handler.go index 452b8c421..1ca2eea8a 100644 --- a/internal/hook/wait_exec_hook_handler.go +++ b/internal/hook/wait_exec_hook_handler.go @@ -166,7 +166,11 @@ func (e *DefaultWaitExecHookHandler) HandleHooks( err := fmt.Errorf("hook %s in container %s expired before executing", hook.HookName, hook.Hook.Container) hookLog.Error(err) errors = append(errors, err) - hookTracker.Record(newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, hookPhase(""), true) + + errTracker := hookTracker.Record(newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, hookPhase(""), true) + if errTracker != nil { + hookLog.WithError(errTracker).Warn("Error recording the hook in hook tracker") + } if hook.Hook.OnError == velerov1api.HookErrorModeFail { cancel() @@ -179,17 +183,24 @@ func (e *DefaultWaitExecHookHandler) HandleHooks( OnError: hook.Hook.OnError, Timeout: hook.Hook.ExecTimeout, } - hookTracker.Record(newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, hookPhase(""), false) - if err := e.PodCommandExecutor.ExecutePodCommand(hookLog, podMap, pod.Namespace, pod.Name, hook.HookName, eh); err != nil { - hookLog.WithError(err).Error("Error executing hook") - err = fmt.Errorf("hook %s in container %s failed to execute, err: %v", hook.HookName, hook.Hook.Container, err) - errors = append(errors, err) - hookTracker.Record(newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, hookPhase(""), true) - if hook.Hook.OnError == velerov1api.HookErrorModeFail { - cancel() - return - } + hookFailed := false + var hookErr error + if hookErr = e.PodCommandExecutor.ExecutePodCommand(hookLog, podMap, pod.Namespace, pod.Name, hook.HookName, eh); hookErr != nil { + hookLog.WithError(hookErr).Error("Error executing hook") + hookErr = fmt.Errorf("hook %s in container %s failed to execute, err: %v", hook.HookName, hook.Hook.Container, hookErr) + errors = append(errors, hookErr) + hookFailed = true + } + + errTracker := hookTracker.Record(newPod.Namespace, newPod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, hookPhase(""), hookFailed) + if errTracker != nil { + hookLog.WithError(errTracker).Warn("Error recording the hook in hook tracker") + } + + if hookErr != nil && hook.Hook.OnError == velerov1api.HookErrorModeFail { + cancel() + return } } delete(byContainer, containerName) @@ -233,7 +244,12 @@ func (e *DefaultWaitExecHookHandler) HandleHooks( "hookPhase": "post", }, ) - hookTracker.Record(pod.Namespace, pod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, hookPhase(""), true) + + errTracker := hookTracker.Record(pod.Namespace, pod.Name, hook.Hook.Container, hook.HookSource, hook.HookName, hookPhase(""), true) + if errTracker != nil { + hookLog.WithError(errTracker).Warn("Error recording the hook in hook tracker") + } + hookLog.Error(err) errors = append(errors, err) } diff --git a/internal/hook/wait_exec_hook_handler_test.go b/internal/hook/wait_exec_hook_handler_test.go index fe632d113..3f2354274 100644 --- a/internal/hook/wait_exec_hook_handler_test.go +++ b/internal/hook/wait_exec_hook_handler_test.go @@ -1216,6 +1216,37 @@ func TestRestoreHookTrackerUpdate(t *testing.T) { hookTracker: hookTracker3, expectedFailed: 2, }, + { + name: "a hook was recorded before added to tracker", + groupResource: "pods", + initialPod: builder.ForPod("default", "my-pod"). + Containers(&v1.Container{ + Name: "container1", + }). + ContainerStatuses(&v1.ContainerStatus{ + Name: "container1", + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{}, + }, + }). + Result(), + byContainer: map[string][]PodExecRestoreHook{ + "container1": { + { + HookName: "my-hook-1", + HookSource: HookSourceSpec, + Hook: velerov1api.ExecRestoreHook{ + Container: "container1", + Command: []string{"/usr/bin/foo"}, + OnError: velerov1api.HookErrorModeContinue, + WaitTimeout: metav1.Duration{Duration: time.Millisecond}, + }, + }, + }, + }, + hookTracker: NewHookTracker(), + expectedFailed: 0, + }, } for _, test := range tests1 { diff --git a/pkg/volume/snapshotlocation.go b/internal/volume/snapshotlocation.go similarity index 100% rename from pkg/volume/snapshotlocation.go rename to internal/volume/snapshotlocation.go diff --git a/internal/volume/volumes_information.go b/internal/volume/volumes_information.go new file mode 100644 index 000000000..64fbcb07b --- /dev/null +++ b/internal/volume/volumes_information.go @@ -0,0 +1,556 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package volume + +import ( + "context" + "strconv" + + snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" + "github.com/sirupsen/logrus" + corev1api "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kbclient "sigs.k8s.io/controller-runtime/pkg/client" + + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + velerov2alpha1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1" + "github.com/vmware-tanzu/velero/pkg/itemoperation" + "github.com/vmware-tanzu/velero/pkg/kuberesource" + "github.com/vmware-tanzu/velero/pkg/plugin/velero" + "github.com/vmware-tanzu/velero/pkg/volume" +) + +type VolumeBackupMethod string + +const ( + NativeSnapshot VolumeBackupMethod = "NativeSnapshot" + PodVolumeBackup VolumeBackupMethod = "PodVolumeBackup" + CSISnapshot VolumeBackupMethod = "CSISnapshot" +) + +type VolumeInfo struct { + // The PVC's name. + PVCName string `json:"pvcName,omitempty"` + + // The PVC's namespace + PVCNamespace string `json:"pvcNamespace,omitempty"` + + // The PV name. + PVName string `json:"pvName,omitempty"` + + // The way the volume data is backed up. The valid value includes `VeleroNativeSnapshot`, `PodVolumeBackup` and `CSISnapshot`. + BackupMethod VolumeBackupMethod `json:"backupMethod,omitempty"` + + // Whether the volume's snapshot data is moved to specified storage. + SnapshotDataMoved bool `json:"snapshotDataMoved"` + + // Whether the local snapshot is preserved after snapshot is moved. + // The local snapshot may be a result of CSI snapshot backup(no data movement) + // or a CSI snapshot data movement plus preserve local snapshot. + PreserveLocalSnapshot bool `json:"preserveLocalSnapshot"` + + // Whether the Volume is skipped in this backup. + Skipped bool `json:"skipped"` + + // The reason for the volume is skipped in the backup. + SkippedReason string `json:"skippedReason,omitempty"` + + // Snapshot starts timestamp. + StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"` + + // The Async Operation's ID. + OperationID string `json:"operationID,omitempty"` + + CSISnapshotInfo CSISnapshotInfo `json:"csiSnapshotInfo,omitempty"` + SnapshotDataMovementInfo SnapshotDataMovementInfo `json:"snapshotDataMovementInfo,omitempty"` + NativeSnapshotInfo NativeSnapshotInfo `json:"nativeSnapshotInfo,omitempty"` + PVBInfo PodVolumeBackupInfo `json:"pvbInfo,omitempty"` + PVInfo PVInfo `json:"pvInfo,omitempty"` +} + +// CSISnapshotInfo is used for displaying the CSI snapshot status +type CSISnapshotInfo struct { + // It's the storage provider's snapshot ID for CSI. + SnapshotHandle string `json:"snapshotHandle"` + + // The snapshot corresponding volume size. + Size int64 `json:"size"` + + // The name of the CSI driver. + Driver string `json:"driver"` + + // The name of the VolumeSnapshotContent. + VSCName string `json:"vscName"` +} + +// SnapshotDataMovementInfo is used for displaying the snapshot data mover status. +type SnapshotDataMovementInfo struct { + // The data mover used by the backup. The valid values are `velero` and ``(equals to `velero`). + DataMover string `json:"dataMover"` + + // The type of the uploader that uploads the snapshot data. The valid values are `kopia` and `restic`. + UploaderType string `json:"uploaderType"` + + // The name or ID of the snapshot associated object(SAO). + // SAO is used to support local snapshots for the snapshot data mover, + // e.g. it could be a VolumeSnapshot for CSI snapshot data movement. + RetainedSnapshot string `json:"retainedSnapshot"` + + // It's the filesystem repository's snapshot ID. + SnapshotHandle string `json:"snapshotHandle"` +} + +// NativeSnapshotInfo is used for displaying the Velero native snapshot status. +// A Velero Native Snapshot is a cloud storage snapshot taken by the Velero native +// plugins, e.g. velero-plugin-for-aws, velero-plugin-for-gcp, and +// velero-plugin-for-microsoft-azure. +type NativeSnapshotInfo struct { + // It's the storage provider's snapshot ID for the Velero-native snapshot. + SnapshotHandle string `json:"snapshotHandle"` + + // The cloud provider snapshot volume type. + VolumeType string `json:"volumeType"` + + // The cloud provider snapshot volume's availability zones. + VolumeAZ string `json:"volumeAZ"` + + // The cloud provider snapshot volume's IOPS. + IOPS string `json:"iops"` +} + +// PodVolumeBackupInfo is used for displaying the PodVolumeBackup snapshot status. +type PodVolumeBackupInfo struct { + // It's the file-system uploader's snapshot ID for PodVolumeBackup. + SnapshotHandle string `json:"snapshotHandle"` + + // The snapshot corresponding volume size. + Size int64 `json:"size"` + + // The type of the uploader that uploads the data. The valid values are `kopia` and `restic`. + UploaderType string `json:"uploaderType"` + + // The PVC's corresponding volume name used by Pod + // https://github.com/kubernetes/kubernetes/blob/e4b74dd12fa8cb63c174091d5536a10b8ec19d34/pkg/apis/core/types.go#L48 + VolumeName string `json:"volumeName"` + + // The Pod name mounting this PVC. + PodName string `json:"podName"` + + // The Pod namespace + PodNamespace string `json:"podNamespace"` + + // The PVB-taken k8s node's name. + NodeName string `json:"nodeName"` +} + +// PVInfo is used to store some PV information modified after creation. +// Those information are lost after PV recreation. +type PVInfo struct { + // ReclaimPolicy of PV. It could be different from the referenced StorageClass. + ReclaimPolicy string `json:"reclaimPolicy"` + + // The PV's labels should be kept after recreation. + Labels map[string]string `json:"labels"` +} + +// VolumesInformation contains the information needs by generating +// the backup VolumeInfo array. +type VolumesInformation struct { + // A map contains the backup-included PV detail content. The key is PV name. + pvMap map[string]pvcPvInfo + volumeInfos []*VolumeInfo + + logger logrus.FieldLogger + crClient kbclient.Client + volumeSnapshots []snapshotv1api.VolumeSnapshot + volumeSnapshotContents []snapshotv1api.VolumeSnapshotContent + volumeSnapshotClasses []snapshotv1api.VolumeSnapshotClass + SkippedPVs map[string]string + NativeSnapshots []*volume.Snapshot + PodVolumeBackups []*velerov1api.PodVolumeBackup + BackupOperations []*itemoperation.BackupOperation + BackupName string +} + +type pvcPvInfo struct { + PVCName string + PVCNamespace string + PV corev1api.PersistentVolume +} + +func (v *VolumesInformation) Init() { + v.pvMap = make(map[string]pvcPvInfo) + v.volumeInfos = make([]*VolumeInfo, 0) +} + +func (v *VolumesInformation) InsertPVMap(pv corev1api.PersistentVolume, pvcName, pvcNamespace string) { + if v.pvMap == nil { + v.Init() + } + + v.pvMap[pv.Name] = pvcPvInfo{ + PVCName: pvcName, + PVCNamespace: pvcNamespace, + PV: pv, + } +} + +func (v *VolumesInformation) Result( + csiVolumeSnapshots []snapshotv1api.VolumeSnapshot, + csiVolumeSnapshotContents []snapshotv1api.VolumeSnapshotContent, + csiVolumesnapshotClasses []snapshotv1api.VolumeSnapshotClass, + crClient kbclient.Client, + logger logrus.FieldLogger, +) []*VolumeInfo { + v.logger = logger + v.crClient = crClient + v.volumeSnapshots = csiVolumeSnapshots + v.volumeSnapshotContents = csiVolumeSnapshotContents + v.volumeSnapshotClasses = csiVolumesnapshotClasses + + v.generateVolumeInfoForSkippedPV() + v.generateVolumeInfoForVeleroNativeSnapshot() + v.generateVolumeInfoForCSIVolumeSnapshot() + v.generateVolumeInfoFromPVB() + v.generateVolumeInfoFromDataUpload() + + return v.volumeInfos +} + +// generateVolumeInfoForSkippedPV generate VolumeInfos for SkippedPV. +func (v *VolumesInformation) generateVolumeInfoForSkippedPV() { + tmpVolumeInfos := make([]*VolumeInfo, 0) + + for pvName, skippedReason := range v.SkippedPVs { + if pvcPVInfo := v.retrievePvcPvInfo(pvName, "", ""); pvcPVInfo != nil { + volumeInfo := &VolumeInfo{ + PVCName: pvcPVInfo.PVCName, + PVCNamespace: pvcPVInfo.PVCNamespace, + PVName: pvName, + SnapshotDataMoved: false, + Skipped: true, + SkippedReason: skippedReason, + PVInfo: PVInfo{ + ReclaimPolicy: string(pvcPVInfo.PV.Spec.PersistentVolumeReclaimPolicy), + Labels: pvcPVInfo.PV.Labels, + }, + } + tmpVolumeInfos = append(tmpVolumeInfos, volumeInfo) + } else { + v.logger.Warnf("Cannot find info for PV %s", pvName) + continue + } + } + + v.volumeInfos = append(v.volumeInfos, tmpVolumeInfos...) +} + +// generateVolumeInfoForVeleroNativeSnapshot generate VolumeInfos for Velero native snapshot +func (v *VolumesInformation) generateVolumeInfoForVeleroNativeSnapshot() { + tmpVolumeInfos := make([]*VolumeInfo, 0) + + for _, nativeSnapshot := range v.NativeSnapshots { + var iops int64 + if nativeSnapshot.Spec.VolumeIOPS != nil { + iops = *nativeSnapshot.Spec.VolumeIOPS + } + + if pvcPVInfo := v.retrievePvcPvInfo(nativeSnapshot.Spec.PersistentVolumeName, "", ""); pvcPVInfo != nil { + volumeInfo := &VolumeInfo{ + BackupMethod: NativeSnapshot, + PVCName: pvcPVInfo.PVCName, + PVCNamespace: pvcPVInfo.PVCNamespace, + PVName: pvcPVInfo.PV.Name, + SnapshotDataMoved: false, + Skipped: false, + NativeSnapshotInfo: NativeSnapshotInfo{ + SnapshotHandle: nativeSnapshot.Status.ProviderSnapshotID, + VolumeType: nativeSnapshot.Spec.VolumeType, + VolumeAZ: nativeSnapshot.Spec.VolumeAZ, + IOPS: strconv.FormatInt(iops, 10), + }, + PVInfo: PVInfo{ + ReclaimPolicy: string(pvcPVInfo.PV.Spec.PersistentVolumeReclaimPolicy), + Labels: pvcPVInfo.PV.Labels, + }, + } + + tmpVolumeInfos = append(tmpVolumeInfos, volumeInfo) + } else { + v.logger.Warnf("cannot find info for PV %s", nativeSnapshot.Spec.PersistentVolumeName) + continue + } + } + + v.volumeInfos = append(v.volumeInfos, tmpVolumeInfos...) +} + +// generateVolumeInfoForCSIVolumeSnapshot generate VolumeInfos for CSI VolumeSnapshot +func (v *VolumesInformation) generateVolumeInfoForCSIVolumeSnapshot() { + tmpVolumeInfos := make([]*VolumeInfo, 0) + + for _, volumeSnapshot := range v.volumeSnapshots { + var volumeSnapshotClass *snapshotv1api.VolumeSnapshotClass + var volumeSnapshotContent *snapshotv1api.VolumeSnapshotContent + + // This is protective logic. The passed-in VS should be all related + // to this backup. + if volumeSnapshot.Labels[velerov1api.BackupNameLabel] != v.BackupName { + continue + } + + if volumeSnapshot.Spec.VolumeSnapshotClassName == nil { + v.logger.Warnf("Cannot find VolumeSnapshotClass for VolumeSnapshot %s/%s", volumeSnapshot.Namespace, volumeSnapshot.Name) + continue + } + + if volumeSnapshot.Status == nil || volumeSnapshot.Status.BoundVolumeSnapshotContentName == nil { + v.logger.Warnf("Cannot fine VolumeSnapshotContent for VolumeSnapshot %s/%s", volumeSnapshot.Namespace, volumeSnapshot.Name) + continue + } + + if volumeSnapshot.Spec.Source.PersistentVolumeClaimName == nil { + v.logger.Warnf("VolumeSnapshot %s/%s doesn't have a source PVC", volumeSnapshot.Namespace, volumeSnapshot.Name) + continue + } + + for index := range v.volumeSnapshotClasses { + if *volumeSnapshot.Spec.VolumeSnapshotClassName == v.volumeSnapshotClasses[index].Name { + volumeSnapshotClass = &v.volumeSnapshotClasses[index] + } + } + + for index := range v.volumeSnapshotContents { + if *volumeSnapshot.Status.BoundVolumeSnapshotContentName == v.volumeSnapshotContents[index].Name { + volumeSnapshotContent = &v.volumeSnapshotContents[index] + } + } + + if volumeSnapshotClass == nil || volumeSnapshotContent == nil { + v.logger.Warnf("fail to get VolumeSnapshotContent or VolumeSnapshotClass for VolumeSnapshot: %s/%s", + volumeSnapshot.Namespace, volumeSnapshot.Name) + continue + } + + var operation itemoperation.BackupOperation + for _, op := range v.BackupOperations { + if op.Spec.ResourceIdentifier.GroupResource.String() == kuberesource.VolumeSnapshots.String() && + op.Spec.ResourceIdentifier.Name == volumeSnapshot.Name && + op.Spec.ResourceIdentifier.Namespace == volumeSnapshot.Namespace { + operation = *op + } + } + + var size int64 + if volumeSnapshot.Status.RestoreSize != nil { + size = volumeSnapshot.Status.RestoreSize.Value() + } + snapshotHandle := "" + if volumeSnapshotContent.Status.SnapshotHandle != nil { + snapshotHandle = *volumeSnapshotContent.Status.SnapshotHandle + } + if pvcPVInfo := v.retrievePvcPvInfo("", *volumeSnapshot.Spec.Source.PersistentVolumeClaimName, volumeSnapshot.Namespace); pvcPVInfo != nil { + volumeInfo := &VolumeInfo{ + BackupMethod: CSISnapshot, + PVCName: pvcPVInfo.PVCName, + PVCNamespace: pvcPVInfo.PVCNamespace, + PVName: pvcPVInfo.PV.Name, + Skipped: false, + SnapshotDataMoved: false, + PreserveLocalSnapshot: true, + OperationID: operation.Spec.OperationID, + StartTimestamp: &(volumeSnapshot.CreationTimestamp), + CSISnapshotInfo: CSISnapshotInfo{ + VSCName: *volumeSnapshot.Status.BoundVolumeSnapshotContentName, + Size: size, + Driver: volumeSnapshotClass.Driver, + SnapshotHandle: snapshotHandle, + }, + PVInfo: PVInfo{ + ReclaimPolicy: string(pvcPVInfo.PV.Spec.PersistentVolumeReclaimPolicy), + Labels: pvcPVInfo.PV.Labels, + }, + } + + tmpVolumeInfos = append(tmpVolumeInfos, volumeInfo) + } else { + v.logger.Warnf("cannot find info for PVC %s/%s", volumeSnapshot.Namespace, volumeSnapshot.Spec.Source.PersistentVolumeClaimName) + continue + } + } + + v.volumeInfos = append(v.volumeInfos, tmpVolumeInfos...) +} + +// generateVolumeInfoFromPVB generate VolumeInfo for PVB. +func (v *VolumesInformation) generateVolumeInfoFromPVB() { + tmpVolumeInfos := make([]*VolumeInfo, 0) + + for _, pvb := range v.PodVolumeBackups { + volumeInfo := &VolumeInfo{ + BackupMethod: PodVolumeBackup, + SnapshotDataMoved: false, + Skipped: false, + StartTimestamp: pvb.Status.StartTimestamp, + PVBInfo: PodVolumeBackupInfo{ + SnapshotHandle: pvb.Status.SnapshotID, + Size: pvb.Status.Progress.TotalBytes, + UploaderType: pvb.Spec.UploaderType, + VolumeName: pvb.Spec.Volume, + PodName: pvb.Spec.Pod.Name, + PodNamespace: pvb.Spec.Pod.Namespace, + NodeName: pvb.Spec.Node, + }, + } + + pod := new(corev1api.Pod) + pvcName := "" + err := v.crClient.Get(context.TODO(), kbclient.ObjectKey{Namespace: pvb.Spec.Pod.Namespace, Name: pvb.Spec.Pod.Name}, pod) + if err != nil { + v.logger.WithError(err).Warn("Fail to get pod for PodVolumeBackup: ", pvb.Name) + continue + } + for _, volume := range pod.Spec.Volumes { + if volume.Name == pvb.Spec.Volume && volume.PersistentVolumeClaim != nil { + pvcName = volume.PersistentVolumeClaim.ClaimName + } + } + + if pvcName != "" { + if pvcPVInfo := v.retrievePvcPvInfo("", pvcName, pod.Namespace); pvcPVInfo != nil { + volumeInfo.PVCName = pvcPVInfo.PVCName + volumeInfo.PVCNamespace = pvcPVInfo.PVCNamespace + volumeInfo.PVName = pvcPVInfo.PV.Name + volumeInfo.PVInfo = PVInfo{ + ReclaimPolicy: string(pvcPVInfo.PV.Spec.PersistentVolumeReclaimPolicy), + Labels: pvcPVInfo.PV.Labels, + } + } else { + v.logger.Warnf("Cannot find info for PVC %s/%s", pod.Namespace, pvcName) + continue + } + } else { + v.logger.Debug("The PVB %s doesn't have a corresponding PVC", pvb.Name) + } + + tmpVolumeInfos = append(tmpVolumeInfos, volumeInfo) + } + + v.volumeInfos = append(v.volumeInfos, tmpVolumeInfos...) +} + +// generateVolumeInfoFromDataUpload generate VolumeInfo for DataUpload. +func (v *VolumesInformation) generateVolumeInfoFromDataUpload() { + tmpVolumeInfos := make([]*VolumeInfo, 0) + vsClassList := new(snapshotv1api.VolumeSnapshotClassList) + if err := v.crClient.List(context.TODO(), vsClassList); err != nil { + v.logger.WithError(err).Errorf("cannot list VolumeSnapshotClass %s", err.Error()) + return + } + + for _, operation := range v.BackupOperations { + if operation.Spec.ResourceIdentifier.GroupResource.String() == kuberesource.PersistentVolumeClaims.String() { + var duIdentifier velero.ResourceIdentifier + + for _, identifier := range operation.Spec.PostOperationItems { + if identifier.GroupResource.String() == "datauploads.velero.io" { + duIdentifier = identifier + } + } + if duIdentifier.Empty() { + v.logger.Warnf("cannot find DataUpload for PVC %s/%s backup async operation", + operation.Spec.ResourceIdentifier.Namespace, operation.Spec.ResourceIdentifier.Name) + continue + } + + dataUpload := new(velerov2alpha1.DataUpload) + err := v.crClient.Get( + context.TODO(), + kbclient.ObjectKey{ + Namespace: duIdentifier.Namespace, + Name: duIdentifier.Name}, + dataUpload, + ) + if err != nil { + v.logger.Warnf("fail to get DataUpload for operation %s: %s", operation.Spec.OperationID, err.Error()) + continue + } + + driverUsedByVSClass := "" + for index := range vsClassList.Items { + if vsClassList.Items[index].Name == dataUpload.Spec.CSISnapshot.SnapshotClass { + driverUsedByVSClass = vsClassList.Items[index].Driver + } + } + + if pvcPVInfo := v.retrievePvcPvInfo("", operation.Spec.ResourceIdentifier.Name, operation.Spec.ResourceIdentifier.Namespace); pvcPVInfo != nil { + volumeInfo := &VolumeInfo{ + BackupMethod: CSISnapshot, + PVCName: pvcPVInfo.PVCName, + PVCNamespace: pvcPVInfo.PVCNamespace, + PVName: pvcPVInfo.PV.Name, + SnapshotDataMoved: true, + Skipped: false, + OperationID: operation.Spec.OperationID, + StartTimestamp: operation.Status.Created, + CSISnapshotInfo: CSISnapshotInfo{ + Driver: driverUsedByVSClass, + }, + SnapshotDataMovementInfo: SnapshotDataMovementInfo{ + DataMover: dataUpload.Spec.DataMover, + UploaderType: "kopia", + }, + PVInfo: PVInfo{ + ReclaimPolicy: string(pvcPVInfo.PV.Spec.PersistentVolumeReclaimPolicy), + Labels: pvcPVInfo.PV.Labels, + }, + } + + tmpVolumeInfos = append(tmpVolumeInfos, volumeInfo) + } else { + v.logger.Warnf("Cannot find info for PVC %s/%s", operation.Spec.ResourceIdentifier.Namespace, operation.Spec.ResourceIdentifier.Name) + continue + } + } + } + + v.volumeInfos = append(v.volumeInfos, tmpVolumeInfos...) +} + +// retrievePvcPvInfo gets the PvcPvInfo from the PVMap. +// support retrieve info by PV's name, or by PVC's name +// and namespace. +func (v *VolumesInformation) retrievePvcPvInfo(pvName, pvcName, pvcNS string) *pvcPvInfo { + if pvName != "" { + if info, ok := v.pvMap[pvName]; ok { + return &info + } + return nil + } + + if pvcNS == "" || pvcName == "" { + return nil + } + + for _, info := range v.pvMap { + if pvcNS == info.PVCNamespace && pvcName == info.PVCName { + return &info + } + } + + return nil +} diff --git a/internal/volume/volumes_information_test.go b/internal/volume/volumes_information_test.go new file mode 100644 index 000000000..45051514d --- /dev/null +++ b/internal/volume/volumes_information_test.go @@ -0,0 +1,856 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package volume + +import ( + "context" + "testing" + + snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/require" + corev1api "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + velerov2alpha1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1" + "github.com/vmware-tanzu/velero/pkg/builder" + "github.com/vmware-tanzu/velero/pkg/itemoperation" + "github.com/vmware-tanzu/velero/pkg/plugin/velero" + velerotest "github.com/vmware-tanzu/velero/pkg/test" + "github.com/vmware-tanzu/velero/pkg/util/logging" + "github.com/vmware-tanzu/velero/pkg/volume" +) + +func TestGenerateVolumeInfoForSkippedPV(t *testing.T) { + tests := []struct { + name string + skippedPVName string + pvMap map[string]pvcPvInfo + expectedVolumeInfos []*VolumeInfo + }{ + { + name: "Cannot find info for PV", + skippedPVName: "testPV", + pvMap: map[string]pvcPvInfo{ + "velero/testPVC": { + PVCName: "testPVC", + PVCNamespace: "velero", + PV: corev1api.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPV", + Labels: map[string]string{"a": "b"}, + }, + Spec: corev1api.PersistentVolumeSpec{ + PersistentVolumeReclaimPolicy: corev1api.PersistentVolumeReclaimDelete, + }, + }, + }, + }, + expectedVolumeInfos: []*VolumeInfo{}, + }, + { + name: "Normal Skipped PV info", + skippedPVName: "testPV", + pvMap: map[string]pvcPvInfo{ + "velero/testPVC": { + PVCName: "testPVC", + PVCNamespace: "velero", + PV: corev1api.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPV", + Labels: map[string]string{"a": "b"}, + }, + Spec: corev1api.PersistentVolumeSpec{ + PersistentVolumeReclaimPolicy: corev1api.PersistentVolumeReclaimDelete, + }, + }, + }, + "testPV": { + PVCName: "testPVC", + PVCNamespace: "velero", + PV: corev1api.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPV", + Labels: map[string]string{"a": "b"}, + }, + Spec: corev1api.PersistentVolumeSpec{ + PersistentVolumeReclaimPolicy: corev1api.PersistentVolumeReclaimDelete, + }, + }, + }, + }, + expectedVolumeInfos: []*VolumeInfo{ + { + PVCName: "testPVC", + PVCNamespace: "velero", + PVName: "testPV", + Skipped: true, + SkippedReason: "CSI: skipped for PodVolumeBackup", + PVInfo: PVInfo{ + ReclaimPolicy: "Delete", + Labels: map[string]string{ + "a": "b", + }, + }, + }, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + volumesInfo := VolumesInformation{} + volumesInfo.Init() + + if tc.skippedPVName != "" { + volumesInfo.SkippedPVs = map[string]string{ + tc.skippedPVName: "CSI: skipped for PodVolumeBackup", + } + } + + if tc.pvMap != nil { + for k, v := range tc.pvMap { + volumesInfo.pvMap[k] = v + } + } + volumesInfo.logger = logging.DefaultLogger(logrus.DebugLevel, logging.FormatJSON) + + volumesInfo.generateVolumeInfoForSkippedPV() + require.Equal(t, tc.expectedVolumeInfos, volumesInfo.volumeInfos) + }) + } +} + +func TestGenerateVolumeInfoForVeleroNativeSnapshot(t *testing.T) { + tests := []struct { + name string + nativeSnapshot volume.Snapshot + pvMap map[string]pvcPvInfo + expectedVolumeInfos []*VolumeInfo + }{ + { + name: "Native snapshot's IPOS pointer is nil", + nativeSnapshot: volume.Snapshot{ + Spec: volume.SnapshotSpec{ + PersistentVolumeName: "testPV", + VolumeIOPS: nil, + }, + }, + expectedVolumeInfos: []*VolumeInfo{}, + }, + { + name: "Cannot find info for the PV", + nativeSnapshot: volume.Snapshot{ + Spec: volume.SnapshotSpec{ + PersistentVolumeName: "testPV", + VolumeIOPS: int64Ptr(100), + }, + }, + expectedVolumeInfos: []*VolumeInfo{}, + }, + { + name: "Cannot find PV info in pvMap", + pvMap: map[string]pvcPvInfo{ + "velero/testPVC": { + PVCName: "testPVC", + PVCNamespace: "velero", + PV: corev1api.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPV", + Labels: map[string]string{"a": "b"}, + }, + Spec: corev1api.PersistentVolumeSpec{ + PersistentVolumeReclaimPolicy: corev1api.PersistentVolumeReclaimDelete, + }, + }, + }, + }, + nativeSnapshot: volume.Snapshot{ + Spec: volume.SnapshotSpec{ + PersistentVolumeName: "testPV", + VolumeIOPS: int64Ptr(100), + VolumeType: "ssd", + VolumeAZ: "us-central1-a", + }, + Status: volume.SnapshotStatus{ + ProviderSnapshotID: "pvc-b31e3386-4bbb-4937-95d-7934cd62-b0a1-494b-95d7-0687440e8d0c", + }, + }, + expectedVolumeInfos: []*VolumeInfo{}, + }, + { + name: "Normal native snapshot", + pvMap: map[string]pvcPvInfo{ + "testPV": { + PVCName: "testPVC", + PVCNamespace: "velero", + PV: corev1api.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPV", + Labels: map[string]string{"a": "b"}, + }, + Spec: corev1api.PersistentVolumeSpec{ + PersistentVolumeReclaimPolicy: corev1api.PersistentVolumeReclaimDelete, + }, + }, + }, + }, + nativeSnapshot: volume.Snapshot{ + Spec: volume.SnapshotSpec{ + PersistentVolumeName: "testPV", + VolumeIOPS: int64Ptr(100), + VolumeType: "ssd", + VolumeAZ: "us-central1-a", + }, + Status: volume.SnapshotStatus{ + ProviderSnapshotID: "pvc-b31e3386-4bbb-4937-95d-7934cd62-b0a1-494b-95d7-0687440e8d0c", + }, + }, + expectedVolumeInfos: []*VolumeInfo{ + { + PVCName: "testPVC", + PVCNamespace: "velero", + PVName: "testPV", + BackupMethod: NativeSnapshot, + PVInfo: PVInfo{ + ReclaimPolicy: "Delete", + Labels: map[string]string{ + "a": "b", + }, + }, + NativeSnapshotInfo: NativeSnapshotInfo{ + SnapshotHandle: "pvc-b31e3386-4bbb-4937-95d-7934cd62-b0a1-494b-95d7-0687440e8d0c", + VolumeType: "ssd", + VolumeAZ: "us-central1-a", + IOPS: "100", + }, + }, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + volumesInfo := VolumesInformation{} + volumesInfo.Init() + volumesInfo.NativeSnapshots = append(volumesInfo.NativeSnapshots, &tc.nativeSnapshot) + if tc.pvMap != nil { + for k, v := range tc.pvMap { + volumesInfo.pvMap[k] = v + } + } + volumesInfo.logger = logging.DefaultLogger(logrus.DebugLevel, logging.FormatJSON) + + volumesInfo.generateVolumeInfoForVeleroNativeSnapshot() + require.Equal(t, tc.expectedVolumeInfos, volumesInfo.volumeInfos) + }) + } +} + +func TestGenerateVolumeInfoForCSIVolumeSnapshot(t *testing.T) { + resourceQuantity := resource.MustParse("100Gi") + now := metav1.Now() + tests := []struct { + name string + volumeSnapshot snapshotv1api.VolumeSnapshot + volumeSnapshotContent snapshotv1api.VolumeSnapshotContent + volumeSnapshotClass snapshotv1api.VolumeSnapshotClass + pvMap map[string]pvcPvInfo + operation *itemoperation.BackupOperation + expectedVolumeInfos []*VolumeInfo + }{ + { + name: "VS doesn't have VolumeSnapshotClass name", + volumeSnapshot: snapshotv1api.VolumeSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testVS", + Namespace: "velero", + }, + Spec: snapshotv1api.VolumeSnapshotSpec{}, + }, + expectedVolumeInfos: []*VolumeInfo{}, + }, + { + name: "VS doesn't have status", + volumeSnapshot: snapshotv1api.VolumeSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testVS", + Namespace: "velero", + }, + Spec: snapshotv1api.VolumeSnapshotSpec{ + VolumeSnapshotClassName: stringPtr("testClass"), + }, + }, + expectedVolumeInfos: []*VolumeInfo{}, + }, + { + name: "VS doesn't have PVC", + volumeSnapshot: snapshotv1api.VolumeSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testVS", + Namespace: "velero", + }, + Spec: snapshotv1api.VolumeSnapshotSpec{ + VolumeSnapshotClassName: stringPtr("testClass"), + }, + Status: &snapshotv1api.VolumeSnapshotStatus{ + BoundVolumeSnapshotContentName: stringPtr("testContent"), + }, + }, + expectedVolumeInfos: []*VolumeInfo{}, + }, + { + name: "Cannot find VSC for VS", + volumeSnapshot: snapshotv1api.VolumeSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testVS", + Namespace: "velero", + }, + Spec: snapshotv1api.VolumeSnapshotSpec{ + VolumeSnapshotClassName: stringPtr("testClass"), + Source: snapshotv1api.VolumeSnapshotSource{ + PersistentVolumeClaimName: stringPtr("testPVC"), + }, + }, + Status: &snapshotv1api.VolumeSnapshotStatus{ + BoundVolumeSnapshotContentName: stringPtr("testContent"), + }, + }, + expectedVolumeInfos: []*VolumeInfo{}, + }, + { + name: "Cannot find VolumeInfo for PVC", + volumeSnapshot: snapshotv1api.VolumeSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testVS", + Namespace: "velero", + }, + Spec: snapshotv1api.VolumeSnapshotSpec{ + VolumeSnapshotClassName: stringPtr("testClass"), + Source: snapshotv1api.VolumeSnapshotSource{ + PersistentVolumeClaimName: stringPtr("testPVC"), + }, + }, + Status: &snapshotv1api.VolumeSnapshotStatus{ + BoundVolumeSnapshotContentName: stringPtr("testContent"), + }, + }, + volumeSnapshotClass: *builder.ForVolumeSnapshotClass("testClass").Driver("pd.csi.storage.gke.io").Result(), + volumeSnapshotContent: *builder.ForVolumeSnapshotContent("testContent").Status(&snapshotv1api.VolumeSnapshotContentStatus{SnapshotHandle: stringPtr("testSnapshotHandle")}).Result(), + expectedVolumeInfos: []*VolumeInfo{}, + }, + { + name: "Normal VolumeSnapshot case", + volumeSnapshot: snapshotv1api.VolumeSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testVS", + Namespace: "velero", + CreationTimestamp: now, + }, + Spec: snapshotv1api.VolumeSnapshotSpec{ + VolumeSnapshotClassName: stringPtr("testClass"), + Source: snapshotv1api.VolumeSnapshotSource{ + PersistentVolumeClaimName: stringPtr("testPVC"), + }, + }, + Status: &snapshotv1api.VolumeSnapshotStatus{ + BoundVolumeSnapshotContentName: stringPtr("testContent"), + RestoreSize: &resourceQuantity, + }, + }, + volumeSnapshotClass: *builder.ForVolumeSnapshotClass("testClass").Driver("pd.csi.storage.gke.io").Result(), + volumeSnapshotContent: *builder.ForVolumeSnapshotContent("testContent").Status(&snapshotv1api.VolumeSnapshotContentStatus{SnapshotHandle: stringPtr("testSnapshotHandle")}).Result(), + pvMap: map[string]pvcPvInfo{ + "testPV": { + PVCName: "testPVC", + PVCNamespace: "velero", + PV: corev1api.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPV", + Labels: map[string]string{"a": "b"}, + }, + Spec: corev1api.PersistentVolumeSpec{ + PersistentVolumeReclaimPolicy: corev1api.PersistentVolumeReclaimDelete, + }, + }, + }, + }, + operation: &itemoperation.BackupOperation{ + Spec: itemoperation.BackupOperationSpec{ + OperationID: "testID", + ResourceIdentifier: velero.ResourceIdentifier{ + GroupResource: schema.GroupResource{ + Group: "snapshot.storage.k8s.io", + Resource: "volumesnapshots", + }, + Namespace: "velero", + Name: "testVS", + }, + }, + }, + expectedVolumeInfos: []*VolumeInfo{ + { + PVCName: "testPVC", + PVCNamespace: "velero", + PVName: "testPV", + BackupMethod: CSISnapshot, + OperationID: "testID", + StartTimestamp: &now, + PreserveLocalSnapshot: true, + CSISnapshotInfo: CSISnapshotInfo{ + Driver: "pd.csi.storage.gke.io", + SnapshotHandle: "testSnapshotHandle", + Size: 107374182400, + VSCName: "testContent", + }, + PVInfo: PVInfo{ + ReclaimPolicy: "Delete", + Labels: map[string]string{ + "a": "b", + }, + }, + }, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + volumesInfo := VolumesInformation{} + volumesInfo.Init() + + if tc.pvMap != nil { + for k, v := range tc.pvMap { + volumesInfo.pvMap[k] = v + } + } + + if tc.operation != nil { + volumesInfo.BackupOperations = append(volumesInfo.BackupOperations, tc.operation) + } + + volumesInfo.volumeSnapshots = []snapshotv1api.VolumeSnapshot{tc.volumeSnapshot} + volumesInfo.volumeSnapshotContents = []snapshotv1api.VolumeSnapshotContent{tc.volumeSnapshotContent} + volumesInfo.volumeSnapshotClasses = []snapshotv1api.VolumeSnapshotClass{tc.volumeSnapshotClass} + volumesInfo.logger = logging.DefaultLogger(logrus.DebugLevel, logging.FormatJSON) + + volumesInfo.generateVolumeInfoForCSIVolumeSnapshot() + require.Equal(t, tc.expectedVolumeInfos, volumesInfo.volumeInfos) + }) + } +} + +func TestGenerateVolumeInfoFromPVB(t *testing.T) { + tests := []struct { + name string + pvb *velerov1api.PodVolumeBackup + pod *corev1api.Pod + pvMap map[string]pvcPvInfo + expectedVolumeInfos []*VolumeInfo + }{ + { + name: "cannot find PVB's pod, should fail", + pvb: builder.ForPodVolumeBackup("velero", "testPVB").PodName("testPod").PodNamespace("velero").Result(), + expectedVolumeInfos: []*VolumeInfo{}, + }, + { + name: "PVB doesn't have a related PVC", + pvb: builder.ForPodVolumeBackup("velero", "testPVB").PodName("testPod").PodNamespace("velero").Result(), + pod: builder.ForPod("velero", "testPod").Containers(&corev1api.Container{ + Name: "test", + VolumeMounts: []corev1api.VolumeMount{ + { + Name: "testVolume", + MountPath: "/data", + }, + }, + }).Volumes( + &corev1api.Volume{ + Name: "", + VolumeSource: corev1api.VolumeSource{ + HostPath: &corev1api.HostPathVolumeSource{}, + }, + }, + ).Result(), + expectedVolumeInfos: []*VolumeInfo{ + { + PVCName: "", + PVCNamespace: "", + PVName: "", + BackupMethod: PodVolumeBackup, + PVBInfo: PodVolumeBackupInfo{ + PodName: "testPod", + PodNamespace: "velero", + }, + }, + }, + }, + { + name: "Backup doesn't have information for PVC", + pvb: builder.ForPodVolumeBackup("velero", "testPVB").PodName("testPod").PodNamespace("velero").Result(), + pod: builder.ForPod("velero", "testPod").Containers(&corev1api.Container{ + Name: "test", + VolumeMounts: []corev1api.VolumeMount{ + { + Name: "testVolume", + MountPath: "/data", + }, + }, + }).Volumes( + &corev1api.Volume{ + Name: "", + VolumeSource: corev1api.VolumeSource{ + PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{ + ClaimName: "testPVC", + }, + }, + }, + ).Result(), + expectedVolumeInfos: []*VolumeInfo{}, + }, + { + name: "PVB's volume has a PVC", + pvMap: map[string]pvcPvInfo{ + "testPV": { + PVCName: "testPVC", + PVCNamespace: "velero", + PV: corev1api.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPV", + Labels: map[string]string{"a": "b"}, + }, + Spec: corev1api.PersistentVolumeSpec{ + PersistentVolumeReclaimPolicy: corev1api.PersistentVolumeReclaimDelete, + }, + }, + }, + }, + pvb: builder.ForPodVolumeBackup("velero", "testPVB").PodName("testPod").PodNamespace("velero").Result(), + pod: builder.ForPod("velero", "testPod").Containers(&corev1api.Container{ + Name: "test", + VolumeMounts: []corev1api.VolumeMount{ + { + Name: "testVolume", + MountPath: "/data", + }, + }, + }).Volumes( + &corev1api.Volume{ + Name: "", + VolumeSource: corev1api.VolumeSource{ + PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{ + ClaimName: "testPVC", + }, + }, + }, + ).Result(), + expectedVolumeInfos: []*VolumeInfo{ + { + PVCName: "testPVC", + PVCNamespace: "velero", + PVName: "testPV", + BackupMethod: PodVolumeBackup, + PVBInfo: PodVolumeBackupInfo{ + PodName: "testPod", + PodNamespace: "velero", + }, + PVInfo: PVInfo{ + ReclaimPolicy: string(corev1api.PersistentVolumeReclaimDelete), + Labels: map[string]string{"a": "b"}, + }, + }, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + volumesInfo := VolumesInformation{} + volumesInfo.Init() + volumesInfo.crClient = velerotest.NewFakeControllerRuntimeClient(t) + + volumesInfo.PodVolumeBackups = append(volumesInfo.PodVolumeBackups, tc.pvb) + + if tc.pvMap != nil { + for k, v := range tc.pvMap { + volumesInfo.pvMap[k] = v + } + } + if tc.pod != nil { + require.NoError(t, volumesInfo.crClient.Create(context.TODO(), tc.pod)) + } + volumesInfo.logger = logging.DefaultLogger(logrus.DebugLevel, logging.FormatJSON) + + volumesInfo.generateVolumeInfoFromPVB() + require.Equal(t, tc.expectedVolumeInfos, volumesInfo.volumeInfos) + }) + } +} + +func TestGenerateVolumeInfoFromDataUpload(t *testing.T) { + now := metav1.Now() + tests := []struct { + name string + volumeSnapshotClass *snapshotv1api.VolumeSnapshotClass + dataUpload *velerov2alpha1.DataUpload + operation *itemoperation.BackupOperation + pvMap map[string]pvcPvInfo + expectedVolumeInfos []*VolumeInfo + }{ + { + name: "Operation is not for PVC", + operation: &itemoperation.BackupOperation{ + Spec: itemoperation.BackupOperationSpec{ + ResourceIdentifier: velero.ResourceIdentifier{ + GroupResource: schema.GroupResource{ + Group: "", + Resource: "configmaps", + }, + }, + }, + }, + expectedVolumeInfos: []*VolumeInfo{}, + }, + { + name: "Operation doesn't have DataUpload PostItemOperation", + operation: &itemoperation.BackupOperation{ + Spec: itemoperation.BackupOperationSpec{ + ResourceIdentifier: velero.ResourceIdentifier{ + GroupResource: schema.GroupResource{ + Group: "", + Resource: "persistentvolumeclaims", + }, + Namespace: "velero", + Name: "testPVC", + }, + PostOperationItems: []velero.ResourceIdentifier{ + { + GroupResource: schema.GroupResource{ + Group: "", + Resource: "configmaps", + }, + }, + }, + }, + }, + expectedVolumeInfos: []*VolumeInfo{}, + }, + { + name: "DataUpload cannot be found for operation", + operation: &itemoperation.BackupOperation{ + Spec: itemoperation.BackupOperationSpec{ + OperationID: "testOperation", + ResourceIdentifier: velero.ResourceIdentifier{ + GroupResource: schema.GroupResource{ + Group: "", + Resource: "persistentvolumeclaims", + }, + Namespace: "velero", + Name: "testPVC", + }, + PostOperationItems: []velero.ResourceIdentifier{ + { + GroupResource: schema.GroupResource{ + Group: "velero.io", + Resource: "datauploads", + }, + Namespace: "velero", + Name: "testDU", + }, + }, + }, + }, + expectedVolumeInfos: []*VolumeInfo{}, + }, + { + name: "VolumeSnapshotClass cannot be found for operation", + dataUpload: builder.ForDataUpload("velero", "testDU").DataMover("velero").CSISnapshot(&velerov2alpha1.CSISnapshotSpec{ + VolumeSnapshot: "testVS", + }).SnapshotID("testSnapshotHandle").Result(), + operation: &itemoperation.BackupOperation{ + Spec: itemoperation.BackupOperationSpec{ + OperationID: "testOperation", + ResourceIdentifier: velero.ResourceIdentifier{ + GroupResource: schema.GroupResource{ + Group: "", + Resource: "persistentvolumeclaims", + }, + Namespace: "velero", + Name: "testPVC", + }, + PostOperationItems: []velero.ResourceIdentifier{ + { + GroupResource: schema.GroupResource{ + Group: "velero.io", + Resource: "datauploads", + }, + Namespace: "velero", + Name: "testDU", + }, + }, + }, + }, + pvMap: map[string]pvcPvInfo{ + "testPV": { + PVCName: "testPVC", + PVCNamespace: "velero", + PV: corev1api.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPV", + Labels: map[string]string{"a": "b"}, + }, + Spec: corev1api.PersistentVolumeSpec{ + PersistentVolumeReclaimPolicy: corev1api.PersistentVolumeReclaimDelete, + }, + }, + }, + }, + expectedVolumeInfos: []*VolumeInfo{ + { + PVCName: "testPVC", + PVCNamespace: "velero", + PVName: "testPV", + BackupMethod: CSISnapshot, + SnapshotDataMoved: true, + OperationID: "testOperation", + SnapshotDataMovementInfo: SnapshotDataMovementInfo{ + DataMover: "velero", + UploaderType: "kopia", + }, + PVInfo: PVInfo{ + ReclaimPolicy: string(corev1api.PersistentVolumeReclaimDelete), + Labels: map[string]string{"a": "b"}, + }, + }, + }, + }, + { + name: "Normal DataUpload case", + dataUpload: builder.ForDataUpload("velero", "testDU").DataMover("velero").CSISnapshot(&velerov2alpha1.CSISnapshotSpec{ + VolumeSnapshot: "testVS", + SnapshotClass: "testClass", + }).SnapshotID("testSnapshotHandle").Result(), + volumeSnapshotClass: builder.ForVolumeSnapshotClass("testClass").Driver("pd.csi.storage.gke.io").Result(), + operation: &itemoperation.BackupOperation{ + Spec: itemoperation.BackupOperationSpec{ + OperationID: "testOperation", + ResourceIdentifier: velero.ResourceIdentifier{ + GroupResource: schema.GroupResource{ + Group: "", + Resource: "persistentvolumeclaims", + }, + Namespace: "velero", + Name: "testPVC", + }, + PostOperationItems: []velero.ResourceIdentifier{ + { + GroupResource: schema.GroupResource{ + Group: "velero.io", + Resource: "datauploads", + }, + Namespace: "velero", + Name: "testDU", + }, + }, + }, + Status: itemoperation.OperationStatus{ + Created: &now, + }, + }, + pvMap: map[string]pvcPvInfo{ + "testPV": { + PVCName: "testPVC", + PVCNamespace: "velero", + PV: corev1api.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testPV", + Labels: map[string]string{"a": "b"}, + }, + Spec: corev1api.PersistentVolumeSpec{ + PersistentVolumeReclaimPolicy: corev1api.PersistentVolumeReclaimDelete, + }, + }, + }, + }, + expectedVolumeInfos: []*VolumeInfo{ + { + PVCName: "testPVC", + PVCNamespace: "velero", + PVName: "testPV", + BackupMethod: CSISnapshot, + SnapshotDataMoved: true, + OperationID: "testOperation", + StartTimestamp: &now, + CSISnapshotInfo: CSISnapshotInfo{ + Driver: "pd.csi.storage.gke.io", + }, + SnapshotDataMovementInfo: SnapshotDataMovementInfo{ + DataMover: "velero", + UploaderType: "kopia", + }, + PVInfo: PVInfo{ + ReclaimPolicy: string(corev1api.PersistentVolumeReclaimDelete), + Labels: map[string]string{"a": "b"}, + }, + }, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + volumesInfo := VolumesInformation{} + volumesInfo.Init() + + if tc.operation != nil { + volumesInfo.BackupOperations = append(volumesInfo.BackupOperations, tc.operation) + } + + if tc.pvMap != nil { + for k, v := range tc.pvMap { + volumesInfo.pvMap[k] = v + } + } + + volumesInfo.crClient = velerotest.NewFakeControllerRuntimeClient(t) + if tc.dataUpload != nil { + volumesInfo.crClient.Create(context.TODO(), tc.dataUpload) + } + + if tc.volumeSnapshotClass != nil { + volumesInfo.crClient.Create(context.TODO(), tc.volumeSnapshotClass) + } + + volumesInfo.logger = logging.DefaultLogger(logrus.DebugLevel, logging.FormatJSON) + + volumesInfo.generateVolumeInfoFromDataUpload() + require.Equal(t, tc.expectedVolumeInfos, volumesInfo.volumeInfos) + }) + } +} + +func stringPtr(str string) *string { + return &str +} + +func int64Ptr(val int) *int64 { + i := int64(val) + return &i +} diff --git a/pkg/backup/backup_test.go b/pkg/backup/backup_test.go index 6760d627b..66370abac 100644 --- a/pkg/backup/backup_test.go +++ b/pkg/backup/backup_test.go @@ -71,8 +71,8 @@ func TestBackedUpItemsMatchesTarballContents(t *testing.T) { req := &Request{ Backup: defaultBackup().Result(), SkippedPVTracker: NewSkipPVTracker(), - PVMap: map[string]PvcPvInfo{}, } + backupFile := bytes.NewBuffer([]byte{}) apiResources := []*test.APIResource{ @@ -1368,6 +1368,7 @@ func TestBackupItemActionsForSkippedPV(t *testing.T) { "any": "whatever reason", }, }, + includedPVs: map[string]struct{}{}, }, }, apiResources: []*test.APIResource{ diff --git a/pkg/backup/item_backupper.go b/pkg/backup/item_backupper.go index 01258a4aa..40589ad70 100644 --- a/pkg/backup/item_backupper.go +++ b/pkg/backup/item_backupper.go @@ -384,8 +384,8 @@ func (ib *itemBackupper) executeActions( // snapshot was skipped by CSI plugin ib.trackSkippedPV(obj, groupResource, csiSnapshotApproach, "skipped b/c it's not a CSI volume", log) delete(u.GetAnnotations(), skippedNoCSIPVAnnotation) - } else if actionName == csiBIAPluginName || actionName == vsphereBIAPluginName { - // the snapshot has been taken + } else if (actionName == csiBIAPluginName || actionName == vsphereBIAPluginName) && !boolptr.IsSetToFalse(ib.backupRequest.Backup.Spec.SnapshotVolumes) { + // the snapshot has been taken by the BIA plugin ib.unTrackSkippedPV(obj, groupResource, log) } mustInclude := u.GetAnnotations()[mustIncludeAdditionalItemAnnotation] == "true" || finalize @@ -510,6 +510,7 @@ func (ib *itemBackupper) takePVSnapshot(obj runtime.Unstructured, log logrus.Fie if boolptr.IsSetToFalse(ib.backupRequest.Spec.SnapshotVolumes) { log.Info("Backup has volume snapshots disabled; skipping volume snapshot action.") + ib.trackSkippedPV(obj, kuberesource.PersistentVolumes, volumeSnapshotApproach, "backup has volume snapshots disabled", log) return nil } @@ -698,28 +699,15 @@ func (ib *itemBackupper) addVolumeInfo(obj runtime.Unstructured, log logrus.Fiel return err } - if ib.backupRequest.PVMap == nil { - ib.backupRequest.PVMap = make(map[string]PvcPvInfo) - } - pvcName := "" pvcNamespace := "" if pv.Spec.ClaimRef != nil { pvcName = pv.Spec.ClaimRef.Name pvcNamespace = pv.Spec.ClaimRef.Namespace - - ib.backupRequest.PVMap[pvcNamespace+"/"+pvcName] = PvcPvInfo{ - PVCName: pvcName, - PVCNamespace: pvcNamespace, - PV: *pv, - } } - ib.backupRequest.PVMap[pv.Name] = PvcPvInfo{ - PVCName: pvcName, - PVCNamespace: pvcNamespace, - PV: *pv, - } + ib.backupRequest.VolumesInformation.InsertPVMap(*pv, pvcName, pvcNamespace) + return nil } diff --git a/pkg/backup/item_backupper_test.go b/pkg/backup/item_backupper_test.go index 481092a0c..c6ea56547 100644 --- a/pkg/backup/item_backupper_test.go +++ b/pkg/backup/item_backupper_test.go @@ -24,7 +24,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "github.com/vmware-tanzu/velero/pkg/kuberesource" - "github.com/vmware-tanzu/velero/pkg/volume" "github.com/stretchr/testify/assert" corev1api "k8s.io/api/core/v1" @@ -242,36 +241,16 @@ func TestRandom(t *testing.T) { func TestAddVolumeInfo(t *testing.T) { tests := []struct { - name string - pv *corev1api.PersistentVolume - expectedVolumeInfo map[string]PvcPvInfo + name string + pv *corev1api.PersistentVolume }{ { name: "PV has ClaimRef", pv: builder.ForPersistentVolume("testPV").ClaimRef("testNS", "testPVC").Result(), - expectedVolumeInfo: map[string]PvcPvInfo{ - "testPV": { - PVCName: "testPVC", - PVCNamespace: "testNS", - PV: *builder.ForPersistentVolume("testPV").ClaimRef("testNS", "testPVC").Result(), - }, - "testNS/testPVC": { - PVCName: "testPVC", - PVCNamespace: "testNS", - PV: *builder.ForPersistentVolume("testPV").ClaimRef("testNS", "testPVC").Result(), - }, - }, }, { name: "PV has no ClaimRef", pv: builder.ForPersistentVolume("testPV").Result(), - expectedVolumeInfo: map[string]PvcPvInfo{ - "testPV": { - PVCName: "", - PVCNamespace: "", - PV: *builder.ForPersistentVolume("testPV").Result(), - }, - }, }, } @@ -279,7 +258,7 @@ func TestAddVolumeInfo(t *testing.T) { t.Run(tc.name, func(t *testing.T) { ib := itemBackupper{} ib.backupRequest = new(Request) - ib.backupRequest.VolumeInfos.VolumeInfos = make([]volume.VolumeInfo, 0) + ib.backupRequest.VolumesInformation.Init() pvObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.pv) require.NoError(t, err) @@ -287,7 +266,6 @@ func TestAddVolumeInfo(t *testing.T) { err = ib.addVolumeInfo(&unstructured.Unstructured{Object: pvObj}, logger) require.NoError(t, err) - require.Equal(t, tc.expectedVolumeInfo, ib.backupRequest.PVMap) }) } } diff --git a/pkg/backup/pv_skip_tracker.go b/pkg/backup/pv_skip_tracker.go index 64241a240..03f1a719d 100644 --- a/pkg/backup/pv_skip_tracker.go +++ b/pkg/backup/pv_skip_tracker.go @@ -29,6 +29,8 @@ type skipPVTracker struct { // pvs is a map of name of the pv to the list of reasons why it is skipped. // The reasons are stored in a map each key of the map is the backup approach, each approach can have one reason pvs map[string]map[string]string + // includedPVs is a set of pv to be included in the backup, the element in this set should not be in the "pvs" map + includedPVs map[string]struct{} } const ( @@ -40,8 +42,9 @@ const ( func NewSkipPVTracker() *skipPVTracker { return &skipPVTracker{ - RWMutex: &sync.RWMutex{}, - pvs: make(map[string]map[string]string), + RWMutex: &sync.RWMutex{}, + pvs: make(map[string]map[string]string), + includedPVs: make(map[string]struct{}), } } @@ -52,9 +55,12 @@ func (pt *skipPVTracker) Track(name, approach, reason string) { if name == "" || reason == "" { return } + if _, ok := pt.includedPVs[name]; ok { + return + } skipReasons := pt.pvs[name] if skipReasons == nil { - skipReasons = make(map[string]string, 0) + skipReasons = make(map[string]string) pt.pvs[name] = skipReasons } if approach == "" { @@ -64,9 +70,12 @@ func (pt *skipPVTracker) Track(name, approach, reason string) { } // Untrack removes the pvc with the specified namespace and name. +// This func should be called when the PV is taken for snapshot, regardless native snapshot, CSI snapshot or fsb backup +// therefore, in one backup processed if a PV is Untracked once, it will not be tracked again. func (pt *skipPVTracker) Untrack(name string) { pt.Lock() defer pt.Unlock() + pt.includedPVs[name] = struct{}{} delete(pt.pvs, name) } diff --git a/pkg/backup/pv_skip_tracker_test.go b/pkg/backup/pv_skip_tracker_test.go index 16de8f555..8e75a808c 100644 --- a/pkg/backup/pv_skip_tracker_test.go +++ b/pkg/backup/pv_skip_tracker_test.go @@ -53,3 +53,12 @@ func TestSerializeSkipReasons(t *testing.T) { require.Equal(t, "csiSnapshot: not applicable for CSI ;podvolume: it's set to opt-out;", skippedPV.SerializeSkipReasons()) } } + +func TestTrackUntrack(t *testing.T) { + // If a pv is untracked explicitly it can't be Tracked again, b/c the pv is considered backed up already. + tracker := NewSkipPVTracker() + tracker.Track("pv3", podVolumeApproach, "it's set to opt-out") + tracker.Untrack("pv3") + tracker.Track("pv3", csiSnapshotApproach, "not applicable for CSI ") + assert.Equal(t, 0, len(tracker.Summary())) +} diff --git a/pkg/backup/request.go b/pkg/backup/request.go index 6735c23a2..63013ca0d 100644 --- a/pkg/backup/request.go +++ b/pkg/backup/request.go @@ -20,10 +20,9 @@ import ( "fmt" "sort" - corev1api "k8s.io/api/core/v1" - "github.com/vmware-tanzu/velero/internal/hook" "github.com/vmware-tanzu/velero/internal/resourcepolicies" + internalVolume "github.com/vmware-tanzu/velero/internal/volume" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/itemoperation" "github.com/vmware-tanzu/velero/pkg/plugin/framework" @@ -54,17 +53,11 @@ type Request struct { itemOperationsList *[]*itemoperation.BackupOperation ResPolicies *resourcepolicies.Policies SkippedPVTracker *skipPVTracker - // A map contains the backup-included PV detail content. - // The key is PV name or PVC name(The format is PVC-namespace/PVC-name) - PVMap map[string]PvcPvInfo - VolumeInfos volume.VolumeInfos + VolumesInformation internalVolume.VolumesInformation } -type PvcPvInfo struct { - PVCName string - PVCNamespace string - PV corev1api.PersistentVolume -} +// VolumesInformation contains the information needs by generating +// the backup VolumeInfo array. // GetItemOperationsList returns ItemOperationsList, initializing it if necessary func (r *Request) GetItemOperationsList() *[]*itemoperation.BackupOperation { @@ -94,3 +87,17 @@ func (r *Request) BackupResourceList() map[string][]string { return resources } + +func (r *Request) FillVolumesInformation() { + skippedPVMap := make(map[string]string) + + for _, skippedPV := range r.SkippedPVTracker.Summary() { + skippedPVMap[skippedPV.Name] = skippedPV.SerializeSkipReasons() + } + + r.VolumesInformation.SkippedPVs = skippedPVMap + r.VolumesInformation.NativeSnapshots = r.VolumeSnapshots + r.VolumesInformation.PodVolumeBackups = r.PodVolumeBackups + r.VolumesInformation.BackupOperations = *r.GetItemOperationsList() + r.VolumesInformation.BackupName = r.Backup.Name +} diff --git a/pkg/backup/request_test.go b/pkg/backup/request_test.go index 9b04f0b53..e44bfbe68 100644 --- a/pkg/backup/request_test.go +++ b/pkg/backup/request_test.go @@ -1,5 +1,5 @@ /* -Copyright 2019 the Velero contributors. +Copyright the Velero contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/backup/snapshots.go b/pkg/backup/snapshots.go index e9724b9e3..fdfd22cf9 100644 --- a/pkg/backup/snapshots.go +++ b/pkg/backup/snapshots.go @@ -1,3 +1,19 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package backup import ( diff --git a/pkg/builder/pod_builder.go b/pkg/builder/pod_builder.go index 886d7a411..50f8f5e51 100644 --- a/pkg/builder/pod_builder.go +++ b/pkg/builder/pod_builder.go @@ -106,3 +106,8 @@ func (b *PodBuilder) Phase(phase corev1api.PodPhase) *PodBuilder { b.object.Status.Phase = phase return b } + +func (b *PodBuilder) Status(status corev1api.PodStatus) *PodBuilder { + b.object.Status = status + return b +} diff --git a/pkg/cmd/util/downloadrequest/downloadrequest.go b/pkg/cmd/util/downloadrequest/downloadrequest.go index 9595c7625..12df30f32 100644 --- a/pkg/cmd/util/downloadrequest/downloadrequest.go +++ b/pkg/cmd/util/downloadrequest/downloadrequest.go @@ -123,7 +123,7 @@ func Stream(ctx context.Context, kbClient kbclient.Client, namespace, name strin ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, } - httpReq, err := http.NewRequestWithContext(ctx, http.MethodGet, created.Status.DownloadURL, nil) + httpReq, err := http.NewRequestWithContext(context.TODO(), http.MethodGet, created.Status.DownloadURL, nil) if err != nil { return err } diff --git a/pkg/controller/backup_controller.go b/pkg/controller/backup_controller.go index bfbacc541..b9e331b3b 100644 --- a/pkg/controller/backup_controller.go +++ b/pkg/controller/backup_controller.go @@ -21,7 +21,6 @@ import ( "context" "fmt" "os" - "strconv" "strings" "time" @@ -41,19 +40,16 @@ import ( "github.com/vmware-tanzu/velero/internal/credentials" "github.com/vmware-tanzu/velero/internal/resourcepolicies" "github.com/vmware-tanzu/velero/internal/storage" + internalVolume "github.com/vmware-tanzu/velero/internal/volume" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - velerov2alpha1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1" pkgbackup "github.com/vmware-tanzu/velero/pkg/backup" "github.com/vmware-tanzu/velero/pkg/discovery" "github.com/vmware-tanzu/velero/pkg/features" - "github.com/vmware-tanzu/velero/pkg/itemoperation" - "github.com/vmware-tanzu/velero/pkg/kuberesource" "github.com/vmware-tanzu/velero/pkg/label" "github.com/vmware-tanzu/velero/pkg/metrics" "github.com/vmware-tanzu/velero/pkg/persistence" "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt" "github.com/vmware-tanzu/velero/pkg/plugin/framework" - "github.com/vmware-tanzu/velero/pkg/plugin/velero" "github.com/vmware-tanzu/velero/pkg/util/boolptr" "github.com/vmware-tanzu/velero/pkg/util/collections" "github.com/vmware-tanzu/velero/pkg/util/encode" @@ -319,8 +315,8 @@ func (b *backupReconciler) prepareBackupRequest(backup *velerov1api.Backup, logg request := &pkgbackup.Request{ Backup: backup.DeepCopy(), // don't modify items in the cache SkippedPVTracker: pkgbackup.NewSkipPVTracker(), - PVMap: map[string]pkgbackup.PvcPvInfo{}, } + request.VolumesInformation.Init() // set backup major version - deprecated, use Status.FormatVersion request.Status.Version = pkgbackup.BackupVersion @@ -591,7 +587,7 @@ func (b *backupReconciler) validateAndGetSnapshotLocations(backup *velerov1api.B // add credential to config for each location for _, location := range providerLocations { - err = volume.UpdateVolumeSnapshotLocationWithCredentialConfig(location, b.credentialFileStore) + err = internalVolume.UpdateVolumeSnapshotLocationWithCredentialConfig(location, b.credentialFileStore) if err != nil { errors = append(errors, fmt.Sprintf("error adding credentials to volume snapshot location named %s: %v", location.Name, err)) continue @@ -737,9 +733,7 @@ func (b *backupReconciler) runBackup(backup *pkgbackup.Request) error { if logFile, err := backupLog.GetPersistFile(); err != nil { fatalErrs = append(fatalErrs, errors.Wrap(err, "error getting backup log file")) } else { - backup.VolumeInfos.VolumeInfos = generateVolumeInfo(backup, volumeSnapshots, volumeSnapshotContents, volumeSnapshotClasses, b.globalCRClient, backupLog) - - if errs := persistBackup(backup, backupFile, logFile, backupStore, volumeSnapshots, volumeSnapshotContents, volumeSnapshotClasses, results); len(errs) > 0 { + if errs := persistBackup(backup, backupFile, logFile, backupStore, volumeSnapshots, volumeSnapshotContents, volumeSnapshotClasses, results, b.globalCRClient, backupLog); len(errs) > 0 { fatalErrs = append(fatalErrs, errs...) } } @@ -796,8 +790,10 @@ func persistBackup(backup *pkgbackup.Request, backupStore persistence.BackupStore, csiVolumeSnapshots []snapshotv1api.VolumeSnapshot, csiVolumeSnapshotContents []snapshotv1api.VolumeSnapshotContent, - csiVolumesnapshotClasses []snapshotv1api.VolumeSnapshotClass, + csiVolumeSnapshotClasses []snapshotv1api.VolumeSnapshotClass, results map[string]results.Result, + crClient kbclient.Client, + logger logrus.FieldLogger, ) []error { persistErrs := []error{} backupJSON := new(bytes.Buffer) @@ -832,7 +828,7 @@ func persistBackup(backup *pkgbackup.Request, if errs != nil { persistErrs = append(persistErrs, errs...) } - csiSnapshotClassesJSON, errs := encode.ToJSONGzip(csiVolumesnapshotClasses, "csi volume snapshot classes list") + csiSnapshotClassesJSON, errs := encode.ToJSONGzip(csiVolumeSnapshotClasses, "csi volume snapshot classes list") if errs != nil { persistErrs = append(persistErrs, errs...) } @@ -847,7 +843,15 @@ func persistBackup(backup *pkgbackup.Request, persistErrs = append(persistErrs, errs...) } - volumeInfoJSON, errs := encode.ToJSONGzip(backup.VolumeInfos, "backup volumes information") + backup.FillVolumesInformation() + + volumeInfoJSON, errs := encode.ToJSONGzip(backup.VolumesInformation.Result( + csiVolumeSnapshots, + csiVolumeSnapshotContents, + csiVolumeSnapshotClasses, + crClient, + logger, + ), "backup volumes information") if errs != nil { persistErrs = append(persistErrs, errs...) } @@ -912,328 +916,3 @@ func oldAndNewFilterParametersUsedTogether(backupSpec velerov1api.BackupSpec) bo return haveOldResourceFilterParameters && haveNewResourceFilterParameters } - -func generateVolumeInfo(backup *pkgbackup.Request, csiVolumeSnapshots []snapshotv1api.VolumeSnapshot, - csiVolumeSnapshotContents []snapshotv1api.VolumeSnapshotContent, csiVolumesnapshotClasses []snapshotv1api.VolumeSnapshotClass, - crClient kbclient.Client, logger logrus.FieldLogger) []volume.VolumeInfo { - volumeInfos := make([]volume.VolumeInfo, 0) - - skippedVolumeInfos := generateVolumeInfoForSkippedPV(backup, logger) - volumeInfos = append(volumeInfos, skippedVolumeInfos...) - - nativeSnapshotVolumeInfos := generateVolumeInfoForVeleroNativeSnapshot(backup, logger) - volumeInfos = append(volumeInfos, nativeSnapshotVolumeInfos...) - - csiVolumeInfos := generateVolumeInfoForCSIVolumeSnapshot(backup, csiVolumeSnapshots, csiVolumeSnapshotContents, csiVolumesnapshotClasses, logger) - volumeInfos = append(volumeInfos, csiVolumeInfos...) - - pvbVolumeInfos := generateVolumeInfoFromPVB(backup, crClient, logger) - volumeInfos = append(volumeInfos, pvbVolumeInfos...) - - dataUploadVolumeInfos := generateVolumeInfoFromDataUpload(backup, crClient, logger) - volumeInfos = append(volumeInfos, dataUploadVolumeInfos...) - - return volumeInfos -} - -// generateVolumeInfoForSkippedPV generate VolumeInfos for SkippedPV. -func generateVolumeInfoForSkippedPV(backup *pkgbackup.Request, logger logrus.FieldLogger) []volume.VolumeInfo { - tmpVolumeInfos := make([]volume.VolumeInfo, 0) - - for _, skippedPV := range backup.SkippedPVTracker.Summary() { - if pvcPVInfo, ok := backup.PVMap[skippedPV.Name]; ok { - volumeInfo := volume.VolumeInfo{ - PVCName: pvcPVInfo.PVCName, - PVCNamespace: pvcPVInfo.PVCNamespace, - PVName: skippedPV.Name, - SnapshotDataMoved: false, - Skipped: true, - SkippedReason: skippedPV.SerializeSkipReasons(), - PVInfo: volume.PVInfo{ - ReclaimPolicy: string(pvcPVInfo.PV.Spec.PersistentVolumeReclaimPolicy), - Labels: pvcPVInfo.PV.Labels, - }, - } - tmpVolumeInfos = append(tmpVolumeInfos, volumeInfo) - } else { - logger.Warnf("Cannot find info for PV %s", skippedPV.Name) - continue - } - } - - return tmpVolumeInfos -} - -// generateVolumeInfoForVeleroNativeSnapshot generate VolumeInfos for Velero native snapshot -func generateVolumeInfoForVeleroNativeSnapshot(backup *pkgbackup.Request, logger logrus.FieldLogger) []volume.VolumeInfo { - tmpVolumeInfos := make([]volume.VolumeInfo, 0) - - for _, nativeSnapshot := range backup.VolumeSnapshots { - var iops int64 - if nativeSnapshot.Spec.VolumeIOPS != nil { - iops = *nativeSnapshot.Spec.VolumeIOPS - } - - if pvcPVInfo, ok := backup.PVMap[nativeSnapshot.Spec.PersistentVolumeName]; ok { - volumeInfo := volume.VolumeInfo{ - BackupMethod: volume.NativeSnapshot, - PVCName: pvcPVInfo.PVCName, - PVCNamespace: pvcPVInfo.PVCNamespace, - PVName: pvcPVInfo.PV.Name, - SnapshotDataMoved: false, - Skipped: false, - NativeSnapshotInfo: volume.NativeSnapshotInfo{ - SnapshotHandle: nativeSnapshot.Status.ProviderSnapshotID, - VolumeType: nativeSnapshot.Spec.VolumeType, - VolumeAZ: nativeSnapshot.Spec.VolumeAZ, - IOPS: strconv.FormatInt(iops, 10), - }, - PVInfo: volume.PVInfo{ - ReclaimPolicy: string(pvcPVInfo.PV.Spec.PersistentVolumeReclaimPolicy), - Labels: pvcPVInfo.PV.Labels, - }, - } - - tmpVolumeInfos = append(tmpVolumeInfos, volumeInfo) - } else { - logger.Warnf("cannot find info for PV %s", nativeSnapshot.Spec.PersistentVolumeName) - continue - } - } - - return tmpVolumeInfos -} - -// generateVolumeInfoForCSIVolumeSnapshot generate VolumeInfos for CSI VolumeSnapshot -func generateVolumeInfoForCSIVolumeSnapshot(backup *pkgbackup.Request, csiVolumeSnapshots []snapshotv1api.VolumeSnapshot, - csiVolumeSnapshotContents []snapshotv1api.VolumeSnapshotContent, csiVolumesnapshotClasses []snapshotv1api.VolumeSnapshotClass, - logger logrus.FieldLogger) []volume.VolumeInfo { - tmpVolumeInfos := make([]volume.VolumeInfo, 0) - - for _, volumeSnapshot := range csiVolumeSnapshots { - var volumeSnapshotClass *snapshotv1api.VolumeSnapshotClass - var volumeSnapshotContent *snapshotv1api.VolumeSnapshotContent - - // This is protective logic. The passed-in VS should be all related - // to this backup. - if volumeSnapshot.Labels[velerov1api.BackupNameLabel] != backup.Name { - continue - } - - if volumeSnapshot.Spec.VolumeSnapshotClassName == nil { - logger.Warnf("Cannot find VolumeSnapshotClass for VolumeSnapshot %s/%s", volumeSnapshot.Namespace, volumeSnapshot.Name) - continue - } - - if volumeSnapshot.Status == nil || volumeSnapshot.Status.BoundVolumeSnapshotContentName == nil { - logger.Warnf("Cannot fine VolumeSnapshotContent for VolumeSnapshot %s/%s", volumeSnapshot.Namespace, volumeSnapshot.Name) - continue - } - - if volumeSnapshot.Spec.Source.PersistentVolumeClaimName == nil { - logger.Warnf("VolumeSnapshot %s/%s doesn't have a source PVC", volumeSnapshot.Namespace, volumeSnapshot.Name) - continue - } - - for index := range csiVolumesnapshotClasses { - if *volumeSnapshot.Spec.VolumeSnapshotClassName == csiVolumesnapshotClasses[index].Name { - volumeSnapshotClass = &csiVolumesnapshotClasses[index] - } - } - - for index := range csiVolumeSnapshotContents { - if *volumeSnapshot.Status.BoundVolumeSnapshotContentName == csiVolumeSnapshotContents[index].Name { - volumeSnapshotContent = &csiVolumeSnapshotContents[index] - } - } - - if volumeSnapshotClass == nil || volumeSnapshotContent == nil { - logger.Warnf("fail to get VolumeSnapshotContent or VolumeSnapshotClass for VolumeSnapshot: %s/%s", - volumeSnapshot.Namespace, volumeSnapshot.Name) - continue - } - - var operation itemoperation.BackupOperation - for _, op := range *backup.GetItemOperationsList() { - if op.Spec.ResourceIdentifier.GroupResource.String() == kuberesource.VolumeSnapshots.String() && - op.Spec.ResourceIdentifier.Name == volumeSnapshot.Name && - op.Spec.ResourceIdentifier.Namespace == volumeSnapshot.Namespace { - operation = *op - } - } - - var size int64 - if volumeSnapshot.Status.RestoreSize != nil { - size = volumeSnapshot.Status.RestoreSize.Value() - } - snapshotHandle := "" - if volumeSnapshotContent.Status.SnapshotHandle != nil { - snapshotHandle = *volumeSnapshotContent.Status.SnapshotHandle - } - if pvcPVInfo, ok := backup.PVMap[volumeSnapshot.Namespace+"/"+*volumeSnapshot.Spec.Source.PersistentVolumeClaimName]; ok { - volumeInfo := volume.VolumeInfo{ - BackupMethod: volume.CSISnapshot, - PVCName: pvcPVInfo.PVCName, - PVCNamespace: pvcPVInfo.PVCNamespace, - PVName: pvcPVInfo.PV.Name, - Skipped: false, - SnapshotDataMoved: false, - PreserveLocalSnapshot: true, - OperationID: operation.Spec.OperationID, - StartTimestamp: &(volumeSnapshot.CreationTimestamp), - CSISnapshotInfo: volume.CSISnapshotInfo{ - VSCName: *volumeSnapshot.Status.BoundVolumeSnapshotContentName, - Size: size, - Driver: volumeSnapshotClass.Driver, - SnapshotHandle: snapshotHandle, - }, - PVInfo: volume.PVInfo{ - ReclaimPolicy: string(pvcPVInfo.PV.Spec.PersistentVolumeReclaimPolicy), - Labels: pvcPVInfo.PV.Labels, - }, - } - - tmpVolumeInfos = append(tmpVolumeInfos, volumeInfo) - } else { - logger.Warnf("cannot find info for PVC %s/%s", volumeSnapshot.Namespace, volumeSnapshot.Spec.Source.PersistentVolumeClaimName) - continue - } - } - - return tmpVolumeInfos -} - -// generateVolumeInfoFromPVB generate VolumeInfo for PVB. -func generateVolumeInfoFromPVB(backup *pkgbackup.Request, crClient kbclient.Client, logger logrus.FieldLogger) []volume.VolumeInfo { - tmpVolumeInfos := make([]volume.VolumeInfo, 0) - - for _, pvb := range backup.PodVolumeBackups { - volumeInfo := volume.VolumeInfo{ - BackupMethod: volume.PodVolumeBackup, - SnapshotDataMoved: false, - Skipped: false, - StartTimestamp: pvb.Status.StartTimestamp, - PVBInfo: volume.PodVolumeBackupInfo{ - SnapshotHandle: pvb.Status.SnapshotID, - Size: pvb.Status.Progress.TotalBytes, - UploaderType: pvb.Spec.UploaderType, - VolumeName: pvb.Spec.Volume, - PodName: pvb.Spec.Pod.Name, - PodNamespace: pvb.Spec.Pod.Namespace, - NodeName: pvb.Spec.Node, - }, - } - - pod := new(corev1api.Pod) - pvcName := "" - err := crClient.Get(context.TODO(), kbclient.ObjectKey{Namespace: pvb.Spec.Pod.Namespace, Name: pvb.Spec.Pod.Name}, pod) - if err != nil { - logger.WithError(err).Warn("Fail to get pod for PodVolumeBackup: ", pvb.Name) - continue - } - for _, volume := range pod.Spec.Volumes { - if volume.Name == pvb.Spec.Volume && volume.PersistentVolumeClaim != nil { - pvcName = volume.PersistentVolumeClaim.ClaimName - } - } - - if pvcName != "" { - if pvcPVInfo, ok := backup.PVMap[pod.Namespace+"/"+pvcName]; ok { - volumeInfo.PVCName = pvcPVInfo.PVCName - volumeInfo.PVCNamespace = pvcPVInfo.PVCNamespace - volumeInfo.PVName = pvcPVInfo.PV.Name - volumeInfo.PVInfo = volume.PVInfo{ - ReclaimPolicy: string(pvcPVInfo.PV.Spec.PersistentVolumeReclaimPolicy), - Labels: pvcPVInfo.PV.Labels, - } - } else { - logger.Warnf("Cannot find info for PVC %s/%s", pod.Namespace, pvcName) - continue - } - } else { - logger.Debug("The PVB %s doesn't have a corresponding PVC", pvb.Name) - } - - tmpVolumeInfos = append(tmpVolumeInfos, volumeInfo) - } - - return tmpVolumeInfos -} - -// generateVolumeInfoFromDataUpload generate VolumeInfo for DataUpload. -func generateVolumeInfoFromDataUpload(backup *pkgbackup.Request, crClient kbclient.Client, logger logrus.FieldLogger) []volume.VolumeInfo { - tmpVolumeInfos := make([]volume.VolumeInfo, 0) - vsClassList := new(snapshotv1api.VolumeSnapshotClassList) - if err := crClient.List(context.TODO(), vsClassList); err != nil { - logger.WithError(err).Errorf("cannot list VolumeSnapshotClass %s", err.Error()) - return tmpVolumeInfos - } - - for _, operation := range *backup.GetItemOperationsList() { - if operation.Spec.ResourceIdentifier.GroupResource.String() == kuberesource.PersistentVolumeClaims.String() { - var duIdentifier velero.ResourceIdentifier - - for _, identifier := range operation.Spec.PostOperationItems { - if identifier.GroupResource.String() == "datauploads.velero.io" { - duIdentifier = identifier - } - } - if duIdentifier.Empty() { - logger.Warnf("cannot find DataUpload for PVC %s/%s backup async operation", - operation.Spec.ResourceIdentifier.Namespace, operation.Spec.ResourceIdentifier.Name) - continue - } - - dataUpload := new(velerov2alpha1.DataUpload) - err := crClient.Get( - context.TODO(), - kbclient.ObjectKey{ - Namespace: duIdentifier.Namespace, - Name: duIdentifier.Name}, - dataUpload, - ) - if err != nil { - logger.Warnf("fail to get DataUpload for operation %s: %s", operation.Spec.OperationID, err.Error()) - continue - } - - driverUsedByVSClass := "" - for index := range vsClassList.Items { - if vsClassList.Items[index].Name == dataUpload.Spec.CSISnapshot.SnapshotClass { - driverUsedByVSClass = vsClassList.Items[index].Driver - } - } - - if pvcPVInfo, ok := backup.PVMap[operation.Spec.ResourceIdentifier.Namespace+"/"+operation.Spec.ResourceIdentifier.Name]; ok { - volumeInfo := volume.VolumeInfo{ - BackupMethod: volume.CSISnapshot, - PVCName: pvcPVInfo.PVCName, - PVCNamespace: pvcPVInfo.PVCNamespace, - PVName: pvcPVInfo.PV.Name, - SnapshotDataMoved: true, - Skipped: false, - OperationID: operation.Spec.OperationID, - StartTimestamp: operation.Status.Created, - CSISnapshotInfo: volume.CSISnapshotInfo{ - Driver: driverUsedByVSClass, - }, - SnapshotDataMovementInfo: volume.SnapshotDataMovementInfo{ - DataMover: dataUpload.Spec.DataMover, - UploaderType: "kopia", - }, - PVInfo: volume.PVInfo{ - ReclaimPolicy: string(pvcPVInfo.PV.Spec.PersistentVolumeReclaimPolicy), - Labels: pvcPVInfo.PV.Labels, - }, - } - - tmpVolumeInfos = append(tmpVolumeInfos, volumeInfo) - } else { - logger.Warnf("Cannot find info for PVC %s/%s", operation.Spec.ResourceIdentifier.Namespace, operation.Spec.ResourceIdentifier.Name) - continue - } - } - } - - return tmpVolumeInfos -} diff --git a/pkg/controller/backup_controller_test.go b/pkg/controller/backup_controller_test.go index 736209e4a..0ec7175bc 100644 --- a/pkg/controller/backup_controller_test.go +++ b/pkg/controller/backup_controller_test.go @@ -34,26 +34,17 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - corev1api "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/version" "k8s.io/utils/clock" testclocks "k8s.io/utils/clock/testing" ctrl "sigs.k8s.io/controller-runtime" kbclient "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/vmware-tanzu/velero/pkg/backup" - kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube" - "github.com/vmware-tanzu/velero/pkg/volume" - fakeClient "sigs.k8s.io/controller-runtime/pkg/client/fake" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - velerov2alpha1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1" pkgbackup "github.com/vmware-tanzu/velero/pkg/backup" "github.com/vmware-tanzu/velero/pkg/builder" "github.com/vmware-tanzu/velero/pkg/discovery" @@ -65,10 +56,10 @@ import ( "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt" "github.com/vmware-tanzu/velero/pkg/plugin/framework" pluginmocks "github.com/vmware-tanzu/velero/pkg/plugin/mocks" - "github.com/vmware-tanzu/velero/pkg/plugin/velero" biav2 "github.com/vmware-tanzu/velero/pkg/plugin/velero/backupitemaction/v2" velerotest "github.com/vmware-tanzu/velero/pkg/test" "github.com/vmware-tanzu/velero/pkg/util/boolptr" + kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube" "github.com/vmware-tanzu/velero/pkg/util/logging" ) @@ -1742,749 +1733,3 @@ func TestPatchResourceWorksWithStatus(t *testing.T) { } } -func TestGenerateVolumeInfoForSkippedPV(t *testing.T) { - tests := []struct { - name string - skippedPVName string - pvMap map[string]backup.PvcPvInfo - expectedVolumeInfos []volume.VolumeInfo - }{ - { - name: "Cannot find info for PV", - skippedPVName: "testPV", - expectedVolumeInfos: []volume.VolumeInfo{}, - }, - { - name: "Normal Skipped PV info", - skippedPVName: "testPV", - pvMap: map[string]backup.PvcPvInfo{ - "velero/testPVC": { - PVCName: "testPVC", - PVCNamespace: "velero", - PV: corev1api.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testPV", - Labels: map[string]string{"a": "b"}, - }, - Spec: corev1api.PersistentVolumeSpec{ - PersistentVolumeReclaimPolicy: corev1api.PersistentVolumeReclaimDelete, - }, - }, - }, - "testPV": { - PVCName: "testPVC", - PVCNamespace: "velero", - PV: corev1api.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testPV", - Labels: map[string]string{"a": "b"}, - }, - Spec: corev1api.PersistentVolumeSpec{ - PersistentVolumeReclaimPolicy: corev1api.PersistentVolumeReclaimDelete, - }, - }, - }, - }, - expectedVolumeInfos: []volume.VolumeInfo{ - { - PVCName: "testPVC", - PVCNamespace: "velero", - PVName: "testPV", - Skipped: true, - SkippedReason: "CSI: skipped for PodVolumeBackup;", - PVInfo: volume.PVInfo{ - ReclaimPolicy: "Delete", - Labels: map[string]string{ - "a": "b", - }, - }, - }, - }, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - request := new(backup.Request) - request.SkippedPVTracker = backup.NewSkipPVTracker() - if tc.skippedPVName != "" { - request.SkippedPVTracker.Track(tc.skippedPVName, "CSI", "skipped for PodVolumeBackup") - } - if tc.pvMap != nil { - request.PVMap = tc.pvMap - } - logger := logging.DefaultLogger(logrus.DebugLevel, logging.FormatJSON) - - volumeInfos := generateVolumeInfoForSkippedPV(request, logger) - require.Equal(t, tc.expectedVolumeInfos, volumeInfos) - }) - } -} - -func TestGenerateVolumeInfoForCSIVolumeSnapshot(t *testing.T) { - resourceQuantity := resource.MustParse("100Gi") - now := metav1.Now() - tests := []struct { - name string - volumeSnapshot snapshotv1api.VolumeSnapshot - volumeSnapshotContent snapshotv1api.VolumeSnapshotContent - volumeSnapshotClass snapshotv1api.VolumeSnapshotClass - pvMap map[string]backup.PvcPvInfo - operation *itemoperation.BackupOperation - expectedVolumeInfos []volume.VolumeInfo - }{ - { - name: "VS doesn't have VolumeSnapshotClass name", - volumeSnapshot: snapshotv1api.VolumeSnapshot{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testVS", - Namespace: "velero", - }, - Spec: snapshotv1api.VolumeSnapshotSpec{}, - }, - expectedVolumeInfos: []volume.VolumeInfo{}, - }, - { - name: "VS doesn't have status", - volumeSnapshot: snapshotv1api.VolumeSnapshot{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testVS", - Namespace: "velero", - }, - Spec: snapshotv1api.VolumeSnapshotSpec{ - VolumeSnapshotClassName: stringPtr("testClass"), - }, - }, - expectedVolumeInfos: []volume.VolumeInfo{}, - }, - { - name: "VS doesn't have PVC", - volumeSnapshot: snapshotv1api.VolumeSnapshot{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testVS", - Namespace: "velero", - }, - Spec: snapshotv1api.VolumeSnapshotSpec{ - VolumeSnapshotClassName: stringPtr("testClass"), - }, - Status: &snapshotv1api.VolumeSnapshotStatus{ - BoundVolumeSnapshotContentName: stringPtr("testContent"), - }, - }, - expectedVolumeInfos: []volume.VolumeInfo{}, - }, - { - name: "Cannot find VSC for VS", - volumeSnapshot: snapshotv1api.VolumeSnapshot{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testVS", - Namespace: "velero", - }, - Spec: snapshotv1api.VolumeSnapshotSpec{ - VolumeSnapshotClassName: stringPtr("testClass"), - Source: snapshotv1api.VolumeSnapshotSource{ - PersistentVolumeClaimName: stringPtr("testPVC"), - }, - }, - Status: &snapshotv1api.VolumeSnapshotStatus{ - BoundVolumeSnapshotContentName: stringPtr("testContent"), - }, - }, - expectedVolumeInfos: []volume.VolumeInfo{}, - }, - { - name: "Cannot find VolumeInfo for PVC", - volumeSnapshot: snapshotv1api.VolumeSnapshot{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testVS", - Namespace: "velero", - }, - Spec: snapshotv1api.VolumeSnapshotSpec{ - VolumeSnapshotClassName: stringPtr("testClass"), - Source: snapshotv1api.VolumeSnapshotSource{ - PersistentVolumeClaimName: stringPtr("testPVC"), - }, - }, - Status: &snapshotv1api.VolumeSnapshotStatus{ - BoundVolumeSnapshotContentName: stringPtr("testContent"), - }, - }, - volumeSnapshotClass: *builder.ForVolumeSnapshotClass("testClass").Driver("pd.csi.storage.gke.io").Result(), - volumeSnapshotContent: *builder.ForVolumeSnapshotContent("testContent").Status(&snapshotv1api.VolumeSnapshotContentStatus{SnapshotHandle: stringPtr("testSnapshotHandle")}).Result(), - expectedVolumeInfos: []volume.VolumeInfo{}, - }, - { - name: "Normal VolumeSnapshot case", - volumeSnapshot: snapshotv1api.VolumeSnapshot{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testVS", - Namespace: "velero", - CreationTimestamp: now, - }, - Spec: snapshotv1api.VolumeSnapshotSpec{ - VolumeSnapshotClassName: stringPtr("testClass"), - Source: snapshotv1api.VolumeSnapshotSource{ - PersistentVolumeClaimName: stringPtr("testPVC"), - }, - }, - Status: &snapshotv1api.VolumeSnapshotStatus{ - BoundVolumeSnapshotContentName: stringPtr("testContent"), - RestoreSize: &resourceQuantity, - }, - }, - volumeSnapshotClass: *builder.ForVolumeSnapshotClass("testClass").Driver("pd.csi.storage.gke.io").Result(), - volumeSnapshotContent: *builder.ForVolumeSnapshotContent("testContent").Status(&snapshotv1api.VolumeSnapshotContentStatus{SnapshotHandle: stringPtr("testSnapshotHandle")}).Result(), - pvMap: map[string]backup.PvcPvInfo{ - "velero/testPVC": { - PVCName: "testPVC", - PVCNamespace: "velero", - PV: corev1api.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testPV", - Labels: map[string]string{"a": "b"}, - }, - Spec: corev1api.PersistentVolumeSpec{ - PersistentVolumeReclaimPolicy: corev1api.PersistentVolumeReclaimDelete, - }, - }, - }, - }, - operation: &itemoperation.BackupOperation{ - Spec: itemoperation.BackupOperationSpec{ - OperationID: "testID", - ResourceIdentifier: velero.ResourceIdentifier{ - GroupResource: schema.GroupResource{ - Group: "snapshot.storage.k8s.io", - Resource: "volumesnapshots", - }, - Namespace: "velero", - Name: "testVS", - }, - }, - }, - expectedVolumeInfos: []volume.VolumeInfo{ - { - PVCName: "testPVC", - PVCNamespace: "velero", - PVName: "testPV", - BackupMethod: volume.CSISnapshot, - OperationID: "testID", - StartTimestamp: &now, - PreserveLocalSnapshot: true, - CSISnapshotInfo: volume.CSISnapshotInfo{ - Driver: "pd.csi.storage.gke.io", - SnapshotHandle: "testSnapshotHandle", - Size: 107374182400, - VSCName: "testContent", - }, - PVInfo: volume.PVInfo{ - ReclaimPolicy: "Delete", - Labels: map[string]string{ - "a": "b", - }, - }, - }, - }, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - request := new(backup.Request) - request.Backup = new(velerov1api.Backup) - if tc.pvMap != nil { - request.PVMap = tc.pvMap - } - operationList := request.GetItemOperationsList() - if tc.operation != nil { - *operationList = append(*operationList, tc.operation) - } - logger := logging.DefaultLogger(logrus.DebugLevel, logging.FormatJSON) - - volumeInfos := generateVolumeInfoForCSIVolumeSnapshot(request, []snapshotv1api.VolumeSnapshot{tc.volumeSnapshot}, []snapshotv1api.VolumeSnapshotContent{tc.volumeSnapshotContent}, []snapshotv1api.VolumeSnapshotClass{tc.volumeSnapshotClass}, logger) - require.Equal(t, tc.expectedVolumeInfos, volumeInfos) - }) - } - -} - -func TestGenerateVolumeInfoForVeleroNativeSnapshot(t *testing.T) { - tests := []struct { - name string - nativeSnapshot volume.Snapshot - pvMap map[string]backup.PvcPvInfo - expectedVolumeInfos []volume.VolumeInfo - }{ - { - name: "Native snapshot's IPOS pointer is nil", - nativeSnapshot: volume.Snapshot{ - Spec: volume.SnapshotSpec{ - PersistentVolumeName: "testPV", - VolumeIOPS: nil, - }, - }, - expectedVolumeInfos: []volume.VolumeInfo{}, - }, - { - name: "Cannot find info for the PV", - nativeSnapshot: volume.Snapshot{ - Spec: volume.SnapshotSpec{ - PersistentVolumeName: "testPV", - VolumeIOPS: int64Ptr(100), - }, - }, - expectedVolumeInfos: []volume.VolumeInfo{}, - }, - { - name: "Normal native snapshot", - pvMap: map[string]backup.PvcPvInfo{ - "testPV": { - PVCName: "testPVC", - PVCNamespace: "velero", - PV: corev1api.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testPV", - Labels: map[string]string{"a": "b"}, - }, - Spec: corev1api.PersistentVolumeSpec{ - PersistentVolumeReclaimPolicy: corev1api.PersistentVolumeReclaimDelete, - }, - }, - }, - }, - nativeSnapshot: volume.Snapshot{ - Spec: volume.SnapshotSpec{ - PersistentVolumeName: "testPV", - VolumeIOPS: int64Ptr(100), - VolumeType: "ssd", - VolumeAZ: "us-central1-a", - }, - Status: volume.SnapshotStatus{ - ProviderSnapshotID: "pvc-b31e3386-4bbb-4937-95d-7934cd62-b0a1-494b-95d7-0687440e8d0c", - }, - }, - expectedVolumeInfos: []volume.VolumeInfo{ - { - PVCName: "testPVC", - PVCNamespace: "velero", - PVName: "testPV", - BackupMethod: volume.NativeSnapshot, - PVInfo: volume.PVInfo{ - ReclaimPolicy: "Delete", - Labels: map[string]string{ - "a": "b", - }, - }, - NativeSnapshotInfo: volume.NativeSnapshotInfo{ - SnapshotHandle: "pvc-b31e3386-4bbb-4937-95d-7934cd62-b0a1-494b-95d7-0687440e8d0c", - VolumeType: "ssd", - VolumeAZ: "us-central1-a", - IOPS: "100", - }, - }, - }, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - request := new(backup.Request) - request.VolumeSnapshots = append(request.VolumeSnapshots, &tc.nativeSnapshot) - if tc.pvMap != nil { - request.PVMap = tc.pvMap - } - logger := logging.DefaultLogger(logrus.DebugLevel, logging.FormatJSON) - - volumeInfos := generateVolumeInfoForVeleroNativeSnapshot(request, logger) - require.Equal(t, tc.expectedVolumeInfos, volumeInfos) - }) - } -} - -func TestGenerateVolumeInfoFromPVB(t *testing.T) { - tests := []struct { - name string - pvb *velerov1api.PodVolumeBackup - pod *corev1api.Pod - pvMap map[string]backup.PvcPvInfo - expectedVolumeInfos []volume.VolumeInfo - }{ - { - name: "cannot find PVB's pod, should fail", - pvb: builder.ForPodVolumeBackup("velero", "testPVB").PodName("testPod").PodNamespace("velero").Result(), - expectedVolumeInfos: []volume.VolumeInfo{}, - }, - { - name: "PVB doesn't have a related PVC", - pvb: builder.ForPodVolumeBackup("velero", "testPVB").PodName("testPod").PodNamespace("velero").Result(), - pod: builder.ForPod("velero", "testPod").Containers(&corev1api.Container{ - Name: "test", - VolumeMounts: []corev1api.VolumeMount{ - { - Name: "testVolume", - MountPath: "/data", - }, - }, - }).Volumes( - &corev1api.Volume{ - Name: "", - VolumeSource: corev1api.VolumeSource{ - HostPath: &corev1api.HostPathVolumeSource{}, - }, - }, - ).Result(), - expectedVolumeInfos: []volume.VolumeInfo{ - { - PVCName: "", - PVCNamespace: "", - PVName: "", - BackupMethod: volume.PodVolumeBackup, - PVBInfo: volume.PodVolumeBackupInfo{ - PodName: "testPod", - PodNamespace: "velero", - }, - }, - }, - }, - { - name: "Backup doesn't have information for PVC", - pvb: builder.ForPodVolumeBackup("velero", "testPVB").PodName("testPod").PodNamespace("velero").Result(), - pod: builder.ForPod("velero", "testPod").Containers(&corev1api.Container{ - Name: "test", - VolumeMounts: []corev1api.VolumeMount{ - { - Name: "testVolume", - MountPath: "/data", - }, - }, - }).Volumes( - &corev1api.Volume{ - Name: "", - VolumeSource: corev1api.VolumeSource{ - PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{ - ClaimName: "testPVC", - }, - }, - }, - ).Result(), - expectedVolumeInfos: []volume.VolumeInfo{}, - }, - { - name: "PVB's volume has a PVC", - pvMap: map[string]backup.PvcPvInfo{ - "velero/testPVC": { - PVCName: "testPVC", - PVCNamespace: "velero", - PV: corev1api.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testPV", - Labels: map[string]string{"a": "b"}, - }, - Spec: corev1api.PersistentVolumeSpec{ - PersistentVolumeReclaimPolicy: corev1api.PersistentVolumeReclaimDelete, - }, - }, - }, - }, - pvb: builder.ForPodVolumeBackup("velero", "testPVB").PodName("testPod").PodNamespace("velero").Result(), - pod: builder.ForPod("velero", "testPod").Containers(&corev1api.Container{ - Name: "test", - VolumeMounts: []corev1api.VolumeMount{ - { - Name: "testVolume", - MountPath: "/data", - }, - }, - }).Volumes( - &corev1api.Volume{ - Name: "", - VolumeSource: corev1api.VolumeSource{ - PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{ - ClaimName: "testPVC", - }, - }, - }, - ).Result(), - expectedVolumeInfos: []volume.VolumeInfo{ - { - PVCName: "testPVC", - PVCNamespace: "velero", - PVName: "testPV", - BackupMethod: volume.PodVolumeBackup, - PVBInfo: volume.PodVolumeBackupInfo{ - PodName: "testPod", - PodNamespace: "velero", - }, - PVInfo: volume.PVInfo{ - ReclaimPolicy: string(corev1api.PersistentVolumeReclaimDelete), - Labels: map[string]string{"a": "b"}, - }, - }, - }, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - crClient := velerotest.NewFakeControllerRuntimeClient(t) - logger := logging.DefaultLogger(logrus.DebugLevel, logging.FormatJSON) - request := new(pkgbackup.Request) - request.PodVolumeBackups = append(request.PodVolumeBackups, tc.pvb) - if tc.pvMap != nil { - request.PVMap = tc.pvMap - } - if tc.pod != nil { - require.NoError(t, crClient.Create(context.TODO(), tc.pod)) - } - - volumeInfos := generateVolumeInfoFromPVB(request, crClient, logger) - require.Equal(t, tc.expectedVolumeInfos, volumeInfos) - }) - } -} - -func TestGenerateVolumeInfoFromDataUpload(t *testing.T) { - now := metav1.Now() - tests := []struct { - name string - volumeSnapshotClass *snapshotv1api.VolumeSnapshotClass - dataUpload *velerov2alpha1.DataUpload - operation *itemoperation.BackupOperation - pvMap map[string]backup.PvcPvInfo - expectedVolumeInfos []volume.VolumeInfo - }{ - { - name: "Operation is not for PVC", - operation: &itemoperation.BackupOperation{ - Spec: itemoperation.BackupOperationSpec{ - ResourceIdentifier: velero.ResourceIdentifier{ - GroupResource: schema.GroupResource{ - Group: "", - Resource: "configmaps", - }, - }, - }, - }, - expectedVolumeInfos: []volume.VolumeInfo{}, - }, - { - name: "Operation doesn't have DataUpload PostItemOperation", - operation: &itemoperation.BackupOperation{ - Spec: itemoperation.BackupOperationSpec{ - ResourceIdentifier: velero.ResourceIdentifier{ - GroupResource: schema.GroupResource{ - Group: "", - Resource: "persistentvolumeclaims", - }, - Namespace: "velero", - Name: "testPVC", - }, - PostOperationItems: []velero.ResourceIdentifier{ - { - GroupResource: schema.GroupResource{ - Group: "", - Resource: "configmaps", - }, - }, - }, - }, - }, - expectedVolumeInfos: []volume.VolumeInfo{}, - }, - { - name: "DataUpload cannot be found for operation", - operation: &itemoperation.BackupOperation{ - Spec: itemoperation.BackupOperationSpec{ - OperationID: "testOperation", - ResourceIdentifier: velero.ResourceIdentifier{ - GroupResource: schema.GroupResource{ - Group: "", - Resource: "persistentvolumeclaims", - }, - Namespace: "velero", - Name: "testPVC", - }, - PostOperationItems: []velero.ResourceIdentifier{ - { - GroupResource: schema.GroupResource{ - Group: "velero.io", - Resource: "datauploads", - }, - Namespace: "velero", - Name: "testDU", - }, - }, - }, - }, - expectedVolumeInfos: []volume.VolumeInfo{}, - }, - { - name: "VolumeSnapshotClass cannot be found for operation", - dataUpload: builder.ForDataUpload("velero", "testDU").DataMover("velero").CSISnapshot(&velerov2alpha1.CSISnapshotSpec{ - VolumeSnapshot: "testVS", - }).SnapshotID("testSnapshotHandle").Result(), - operation: &itemoperation.BackupOperation{ - Spec: itemoperation.BackupOperationSpec{ - OperationID: "testOperation", - ResourceIdentifier: velero.ResourceIdentifier{ - GroupResource: schema.GroupResource{ - Group: "", - Resource: "persistentvolumeclaims", - }, - Namespace: "velero", - Name: "testPVC", - }, - PostOperationItems: []velero.ResourceIdentifier{ - { - GroupResource: schema.GroupResource{ - Group: "velero.io", - Resource: "datauploads", - }, - Namespace: "velero", - Name: "testDU", - }, - }, - }, - }, - pvMap: map[string]backup.PvcPvInfo{ - "velero/testPVC": { - PVCName: "testPVC", - PVCNamespace: "velero", - PV: corev1api.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testPV", - Labels: map[string]string{"a": "b"}, - }, - Spec: corev1api.PersistentVolumeSpec{ - PersistentVolumeReclaimPolicy: corev1api.PersistentVolumeReclaimDelete, - }, - }, - }, - }, - expectedVolumeInfos: []volume.VolumeInfo{ - { - PVCName: "testPVC", - PVCNamespace: "velero", - PVName: "testPV", - BackupMethod: volume.CSISnapshot, - SnapshotDataMoved: true, - OperationID: "testOperation", - SnapshotDataMovementInfo: volume.SnapshotDataMovementInfo{ - DataMover: "velero", - UploaderType: "kopia", - }, - PVInfo: volume.PVInfo{ - ReclaimPolicy: string(corev1api.PersistentVolumeReclaimDelete), - Labels: map[string]string{"a": "b"}, - }, - }, - }, - }, - { - name: "Normal DataUpload case", - dataUpload: builder.ForDataUpload("velero", "testDU").DataMover("velero").CSISnapshot(&velerov2alpha1.CSISnapshotSpec{ - VolumeSnapshot: "testVS", - SnapshotClass: "testClass", - }).SnapshotID("testSnapshotHandle").Result(), - volumeSnapshotClass: builder.ForVolumeSnapshotClass("testClass").Driver("pd.csi.storage.gke.io").Result(), - operation: &itemoperation.BackupOperation{ - Spec: itemoperation.BackupOperationSpec{ - OperationID: "testOperation", - ResourceIdentifier: velero.ResourceIdentifier{ - GroupResource: schema.GroupResource{ - Group: "", - Resource: "persistentvolumeclaims", - }, - Namespace: "velero", - Name: "testPVC", - }, - PostOperationItems: []velero.ResourceIdentifier{ - { - GroupResource: schema.GroupResource{ - Group: "velero.io", - Resource: "datauploads", - }, - Namespace: "velero", - Name: "testDU", - }, - }, - }, - Status: itemoperation.OperationStatus{ - Created: &now, - }, - }, - pvMap: map[string]backup.PvcPvInfo{ - "velero/testPVC": { - PVCName: "testPVC", - PVCNamespace: "velero", - PV: corev1api.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testPV", - Labels: map[string]string{"a": "b"}, - }, - Spec: corev1api.PersistentVolumeSpec{ - PersistentVolumeReclaimPolicy: corev1api.PersistentVolumeReclaimDelete, - }, - }, - }, - }, - expectedVolumeInfos: []volume.VolumeInfo{ - { - PVCName: "testPVC", - PVCNamespace: "velero", - PVName: "testPV", - BackupMethod: volume.CSISnapshot, - SnapshotDataMoved: true, - OperationID: "testOperation", - StartTimestamp: &now, - CSISnapshotInfo: volume.CSISnapshotInfo{ - Driver: "pd.csi.storage.gke.io", - }, - SnapshotDataMovementInfo: volume.SnapshotDataMovementInfo{ - DataMover: "velero", - UploaderType: "kopia", - }, - PVInfo: volume.PVInfo{ - ReclaimPolicy: string(corev1api.PersistentVolumeReclaimDelete), - Labels: map[string]string{"a": "b"}, - }, - }, - }, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - request := new(backup.Request) - operationList := request.GetItemOperationsList() - if tc.operation != nil { - *operationList = append(*operationList, tc.operation) - } - if tc.pvMap != nil { - request.PVMap = tc.pvMap - } - logger := logging.DefaultLogger(logrus.DebugLevel, logging.FormatJSON) - - crClient := velerotest.NewFakeControllerRuntimeClient(t) - if tc.dataUpload != nil { - crClient.Create(context.TODO(), tc.dataUpload) - } - - if tc.volumeSnapshotClass != nil { - crClient.Create(context.TODO(), tc.volumeSnapshotClass) - } - - volumeInfos := generateVolumeInfoFromDataUpload(request, crClient, logger) - require.Equal(t, tc.expectedVolumeInfos, volumeInfos) - }) - } -} - -func int64Ptr(val int) *int64 { - i := int64(val) - return &i -} - -func stringPtr(str string) *string { - return &str -} diff --git a/pkg/controller/backup_deletion_controller.go b/pkg/controller/backup_deletion_controller.go index 1b78734e2..e8fb6d27d 100644 --- a/pkg/controller/backup_deletion_controller.go +++ b/pkg/controller/backup_deletion_controller.go @@ -22,40 +22,35 @@ import ( "fmt" "time" - corev1 "k8s.io/api/core/v1" - - velerov2alpha1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1" - "github.com/vmware-tanzu/velero/pkg/util/boolptr" - jsonpatch "github.com/evanphx/json-patch" "github.com/pkg/errors" "github.com/sirupsen/logrus" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" kubeerrs "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/utils/clock" - ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/vmware-tanzu/velero/internal/credentials" "github.com/vmware-tanzu/velero/internal/delete" + internalVolume "github.com/vmware-tanzu/velero/internal/volume" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + velerov2alpha1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1" "github.com/vmware-tanzu/velero/pkg/discovery" "github.com/vmware-tanzu/velero/pkg/label" "github.com/vmware-tanzu/velero/pkg/metrics" "github.com/vmware-tanzu/velero/pkg/persistence" "github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt" vsv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/volumesnapshotter/v1" + "github.com/vmware-tanzu/velero/pkg/podvolume" "github.com/vmware-tanzu/velero/pkg/repository" + "github.com/vmware-tanzu/velero/pkg/util/boolptr" "github.com/vmware-tanzu/velero/pkg/util/filesystem" "github.com/vmware-tanzu/velero/pkg/util/kube" - "github.com/vmware-tanzu/velero/pkg/volume" - - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/vmware-tanzu/velero/pkg/podvolume" ) const ( @@ -462,7 +457,7 @@ func (r *backupDeletionReconciler) volumeSnapshottersForVSL( } // add credential to config - err := volume.UpdateVolumeSnapshotLocationWithCredentialConfig(vsl, r.credentialStore) + err := internalVolume.UpdateVolumeSnapshotLocationWithCredentialConfig(vsl, r.credentialStore) if err != nil { return nil, errors.WithStack(err) } diff --git a/pkg/controller/data_download_controller.go b/pkg/controller/data_download_controller.go index e09bac1f5..2810c1ca6 100644 --- a/pkg/controller/data_download_controller.go +++ b/pkg/controller/data_download_controller.go @@ -486,10 +486,6 @@ func (r *DataDownloadReconciler) SetupWithManager(mgr ctrl.Manager) error { return false } - if newObj.Status.Phase != v1.PodRunning { - return false - } - if newObj.Spec.NodeName == "" { return false } @@ -511,43 +507,55 @@ func (r *DataDownloadReconciler) SetupWithManager(mgr ctrl.Manager) error { func (r *DataDownloadReconciler) findSnapshotRestoreForPod(podObj client.Object) []reconcile.Request { pod := podObj.(*v1.Pod) - dd, err := findDataDownloadByPod(r.client, *pod) + + log := r.logger.WithField("pod", pod.Name) if err != nil { - r.logger.WithField("Restore pod", pod.Name).WithError(err).Error("unable to get DataDownload") + log.WithError(err).Error("unable to get DataDownload") return []reconcile.Request{} } else if dd == nil { - r.logger.WithField("Restore pod", pod.Name).Error("get empty DataDownload") + log.Error("get empty DataDownload") return []reconcile.Request{} } + log = log.WithFields(logrus.Fields{ + "Dataddownload": dd.Name, + }) if dd.Status.Phase != velerov2alpha1api.DataDownloadPhaseAccepted { return []reconcile.Request{} } - requests := make([]reconcile.Request, 1) + if pod.Status.Phase == v1.PodRunning { + log.Info("Preparing data download") + // we don't expect anyone else update the CR during the Prepare process + updated, err := r.exclusiveUpdateDataDownload(context.Background(), dd, r.prepareDataDownload) + if err != nil || !updated { + log.WithField("updated", updated).WithError(err).Warn("failed to update datadownload, prepare will halt for this datadownload") + return []reconcile.Request{} + } + } else if unrecoverable, reason := kube.IsPodUnrecoverable(pod, log); unrecoverable { + err := UpdateDataDownloadWithRetry(context.Background(), r.client, types.NamespacedName{Namespace: dd.Namespace, Name: dd.Name}, r.logger.WithField("datadownlad", dd.Name), + func(dataDownload *velerov2alpha1api.DataDownload) { + dataDownload.Spec.Cancel = true + dataDownload.Status.Message = fmt.Sprintf("datadownload mark as cancel to failed early for exposing pod %s/%s is in abnormal status for %s", pod.Namespace, pod.Name, reason) + }) - r.logger.WithField("Restore pod", pod.Name).Infof("Preparing data download %s", dd.Name) - - // we don't expect anyone else update the CR during the Prepare process - updated, err := r.exclusiveUpdateDataDownload(context.Background(), dd, r.prepareDataDownload) - if err != nil || !updated { - r.logger.WithFields(logrus.Fields{ - "Datadownload": dd.Name, - "Restore pod": pod.Name, - "updated": updated, - }).WithError(err).Warn("failed to patch datadownload, prepare will halt for this datadownload") + if err != nil { + log.WithError(err).Warn("failed to cancel datadownload, and it will wait for prepare timeout") + return []reconcile.Request{} + } + log.Info("Exposed pod is in abnormal status, and datadownload is marked as cancel") + } else { return []reconcile.Request{} } - requests[0] = reconcile.Request{ + request := reconcile.Request{ NamespacedName: types.NamespacedName{ Namespace: dd.Namespace, Name: dd.Name, }, } - - return requests + return []reconcile.Request{request} } func (r *DataDownloadReconciler) FindDataDownloads(ctx context.Context, cli client.Client, ns string) ([]*velerov2alpha1api.DataDownload, error) { diff --git a/pkg/controller/data_download_controller_test.go b/pkg/controller/data_download_controller_test.go index afdadf61d..a3c44a7e3 100644 --- a/pkg/controller/data_download_controller_test.go +++ b/pkg/controller/data_download_controller_test.go @@ -632,7 +632,7 @@ func TestFindDataDownloadForPod(t *testing.T) { { name: "find dataDownload for pod", du: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).Result(), - pod: builder.ForPod(velerov1api.DefaultNamespace, dataDownloadName).Labels(map[string]string{velerov1api.DataDownloadLabel: dataDownloadName}).Result(), + pod: builder.ForPod(velerov1api.DefaultNamespace, dataDownloadName).Labels(map[string]string{velerov1api.DataDownloadLabel: dataDownloadName}).Status(corev1.PodStatus{Phase: corev1.PodRunning}).Result(), checkFunc: func(du *velerov2alpha1api.DataDownload, requests []reconcile.Request) { // Assert that the function returns a single request assert.Len(t, requests, 1) diff --git a/pkg/controller/data_upload_controller.go b/pkg/controller/data_upload_controller.go index bbdf13943..ef28378e5 100644 --- a/pkg/controller/data_upload_controller.go +++ b/pkg/controller/data_upload_controller.go @@ -521,10 +521,6 @@ func (r *DataUploadReconciler) SetupWithManager(mgr ctrl.Manager) error { return false } - if newObj.Status.Phase != corev1.PodRunning { - return false - } - if newObj.Spec.NodeName != r.nodeName { return false } @@ -547,37 +543,56 @@ func (r *DataUploadReconciler) SetupWithManager(mgr ctrl.Manager) error { func (r *DataUploadReconciler) findDataUploadForPod(podObj client.Object) []reconcile.Request { pod := podObj.(*corev1.Pod) du, err := findDataUploadByPod(r.client, *pod) + log := r.logger.WithFields(logrus.Fields{ + "Backup pod": pod.Name, + }) + if err != nil { - r.logger.WithField("Backup pod", pod.Name).WithError(err).Error("unable to get dataupload") + log.WithError(err).Error("unable to get dataupload") return []reconcile.Request{} } else if du == nil { - r.logger.WithField("Backup pod", pod.Name).Error("get empty DataUpload") + log.Error("get empty DataUpload") return []reconcile.Request{} } + log = log.WithFields(logrus.Fields{ + "Datadupload": du.Name, + }) if du.Status.Phase != velerov2alpha1api.DataUploadPhaseAccepted { return []reconcile.Request{} } - r.logger.WithField("Backup pod", pod.Name).Infof("Preparing dataupload %s", du.Name) - // we don't expect anyone else update the CR during the Prepare process - updated, err := r.exclusiveUpdateDataUpload(context.Background(), du, r.prepareDataUpload) - if err != nil || !updated { - r.logger.WithFields(logrus.Fields{ - "Dataupload": du.Name, - "Backup pod": pod.Name, - "updated": updated, - }).WithError(err).Warn("failed to patch dataupload, prepare will halt for this dataupload") + if pod.Status.Phase == corev1.PodRunning { + log.Info("Preparing dataupload") + // we don't expect anyone else update the CR during the Prepare process + updated, err := r.exclusiveUpdateDataUpload(context.Background(), du, r.prepareDataUpload) + if err != nil || !updated { + log.WithField("updated", updated).WithError(err).Warn("failed to update dataupload, prepare will halt for this dataupload") + return []reconcile.Request{} + } + } else if unrecoverable, reason := kube.IsPodUnrecoverable(pod, log); unrecoverable { // let the abnormal backup pod failed early + err := UpdateDataUploadWithRetry(context.Background(), r.client, types.NamespacedName{Namespace: du.Namespace, Name: du.Name}, r.logger.WithField("dataupload", du.Name), + func(dataUpload *velerov2alpha1api.DataUpload) { + dataUpload.Spec.Cancel = true + dataUpload.Status.Message = fmt.Sprintf("dataupload mark as cancel to failed early for exposing pod %s/%s is in abnormal status for reason %s", pod.Namespace, pod.Name, reason) + }) + + if err != nil { + log.WithError(err).Warn("failed to cancel dataupload, and it will wait for prepare timeout") + return []reconcile.Request{} + } + log.Info("Exposed pod is in abnormal status and dataupload is marked as cancel") + } else { return []reconcile.Request{} } - requests := reconcile.Request{ + request := reconcile.Request{ NamespacedName: types.NamespacedName{ Namespace: du.Namespace, Name: du.Name, }, } - return []reconcile.Request{requests} + return []reconcile.Request{request} } func (r *DataUploadReconciler) FindDataUploadsByPod(ctx context.Context, cli client.Client, ns string) ([]velerov2alpha1api.DataUpload, error) { diff --git a/pkg/controller/data_upload_controller_test.go b/pkg/controller/data_upload_controller_test.go index cdbadf9d4..25ea662bc 100644 --- a/pkg/controller/data_upload_controller_test.go +++ b/pkg/controller/data_upload_controller_test.go @@ -683,7 +683,7 @@ func TestFindDataUploadForPod(t *testing.T) { { name: "find dataUpload for pod", du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).Result(), - pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Labels(map[string]string{velerov1api.DataUploadLabel: dataUploadName}).Result(), + pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Labels(map[string]string{velerov1api.DataUploadLabel: dataUploadName}).Status(corev1.PodStatus{Phase: corev1.PodRunning}).Result(), checkFunc: func(du *velerov2alpha1api.DataUpload, requests []reconcile.Request) { // Assert that the function returns a single request assert.Len(t, requests, 1) diff --git a/pkg/controller/restore_controller.go b/pkg/controller/restore_controller.go index f6b9b39d9..bacd351eb 100644 --- a/pkg/controller/restore_controller.go +++ b/pkg/controller/restore_controller.go @@ -44,6 +44,7 @@ import ( "github.com/vmware-tanzu/velero/internal/hook" "github.com/vmware-tanzu/velero/internal/resourcemodifiers" + internalVolume "github.com/vmware-tanzu/velero/internal/volume" api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/itemoperation" "github.com/vmware-tanzu/velero/pkg/label" @@ -520,6 +521,17 @@ func (r *restoreReconciler) runValidatedRestore(restore *api.Restore, info backu return errors.Wrap(err, "fail to fetch CSI VolumeSnapshots metadata") } + backupVolumeInfoMap := make(map[string]internalVolume.VolumeInfo) + volumeInfos, err := backupStore.GetBackupVolumeInfos(restore.Spec.BackupName) + if err != nil { + restoreLog.WithError(err).Errorf("fail to get VolumeInfos metadata file for backup %s", restore.Spec.BackupName) + return errors.WithStack(err) + } else { + for _, volumeInfo := range volumeInfos { + backupVolumeInfoMap[volumeInfo.PVName] = *volumeInfo + } + } + restoreLog.Info("starting restore") var podVolumeBackups []*api.PodVolumeBackup @@ -537,6 +549,7 @@ func (r *restoreReconciler) runValidatedRestore(restore *api.Restore, info backu ResourceModifiers: resourceModifiers, DisableInformerCache: r.disableInformerCache, CSIVolumeSnapshots: csiVolumeSnapshots, + VolumeInfoMap: backupVolumeInfoMap, } restoreWarnings, restoreErrors := r.restorer.RestoreWithResolvers(restoreReq, actionsResolver, pluginManager) diff --git a/pkg/controller/restore_controller_test.go b/pkg/controller/restore_controller_test.go index 9437f1d1c..5029c1d6e 100644 --- a/pkg/controller/restore_controller_test.go +++ b/pkg/controller/restore_controller_test.go @@ -29,15 +29,15 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" clocktesting "k8s.io/utils/clock/testing" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - corev1 "k8s.io/api/core/v1" - "github.com/vmware-tanzu/velero/internal/resourcemodifiers" + internalVolume "github.com/vmware-tanzu/velero/internal/volume" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/builder" "github.com/vmware-tanzu/velero/pkg/metrics" @@ -235,6 +235,7 @@ func TestRestoreReconcile(t *testing.T) { putRestoreLogErr error expectedFinalPhase string addValidFinalizer bool + emptyVolumeInfo bool }{ { name: "restore with both namespace in both includedNamespaces and excludedNamespaces fails validation", @@ -415,6 +416,18 @@ func TestRestoreReconcile(t *testing.T) { backup: defaultBackup().StorageLocation("default").Result(), expectedErr: false, }, + { + name: "valid restore with empty VolumeInfos", + location: defaultStorageLocation, + restore: NewRestore("foo", "bar", "backup-1", "ns-1", "", velerov1api.RestorePhaseNew).Result(), + backup: defaultBackup().StorageLocation("default").Result(), + emptyVolumeInfo: true, + expectedErr: false, + expectedPhase: string(velerov1api.RestorePhaseInProgress), + expectedStartTime: ×tamp, + expectedCompletedTime: ×tamp, + expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "ns-1", "", velerov1api.RestorePhaseInProgress).Result(), + }, } formatFlag := logging.FormatText @@ -482,6 +495,11 @@ func TestRestoreReconcile(t *testing.T) { backupStore.On("PutRestoreResults", test.backup.Name, test.restore.Name, mock.Anything).Return(nil) backupStore.On("PutRestoredResourceList", test.restore.Name, mock.Anything).Return(nil) backupStore.On("PutRestoreItemOperations", mock.Anything, mock.Anything).Return(nil) + if test.emptyVolumeInfo == true { + backupStore.On("GetBackupVolumeInfos", test.backup.Name).Return(nil, nil) + } else { + backupStore.On("GetBackupVolumeInfos", test.backup.Name).Return([]*internalVolume.VolumeInfo{}, nil) + } volumeSnapshots := []*volume.Snapshot{ { diff --git a/pkg/persistence/mocks/backup_store.go b/pkg/persistence/mocks/backup_store.go index 1bcdf865c..dfa93408b 100644 --- a/pkg/persistence/mocks/backup_store.go +++ b/pkg/persistence/mocks/backup_store.go @@ -21,15 +21,14 @@ import ( io "io" mock "github.com/stretchr/testify/mock" + volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" + + internalVolume "github.com/vmware-tanzu/velero/internal/volume" itemoperation "github.com/vmware-tanzu/velero/pkg/itemoperation" - - persistence "github.com/vmware-tanzu/velero/pkg/persistence" - + "github.com/vmware-tanzu/velero/pkg/persistence" v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - volume "github.com/vmware-tanzu/velero/pkg/volume" - volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" ) // BackupStore is an autogenerated mock type for the BackupStore type @@ -314,6 +313,29 @@ func (_m *BackupStore) GetRestoreItemOperations(name string) ([]*itemoperation.R return r0, r1 } +// GetRestoreItemOperations provides a mock function with given fields: name +func (_m *BackupStore) GetBackupVolumeInfos(name string) ([]*internalVolume.VolumeInfo, error) { + ret := _m.Called(name) + + var r0 []*internalVolume.VolumeInfo + if rf, ok := ret.Get(0).(func(string) []*internalVolume.VolumeInfo); ok { + r0 = rf(name) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*internalVolume.VolumeInfo) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(name) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // IsValid provides a mock function with given fields: func (_m *BackupStore) IsValid() error { ret := _m.Called() diff --git a/pkg/persistence/object_store.go b/pkg/persistence/object_store.go index 48a48dbf1..539daafce 100644 --- a/pkg/persistence/object_store.go +++ b/pkg/persistence/object_store.go @@ -31,6 +31,7 @@ import ( kerrors "k8s.io/apimachinery/pkg/util/errors" "github.com/vmware-tanzu/velero/internal/credentials" + internalVolume "github.com/vmware-tanzu/velero/internal/volume" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/itemoperation" "github.com/vmware-tanzu/velero/pkg/plugin/velero" @@ -73,6 +74,7 @@ type BackupStore interface { GetCSIVolumeSnapshots(name string) ([]*snapshotv1api.VolumeSnapshot, error) GetCSIVolumeSnapshotContents(name string) ([]*snapshotv1api.VolumeSnapshotContent, error) GetCSIVolumeSnapshotClasses(name string) ([]*snapshotv1api.VolumeSnapshotClass, error) + GetBackupVolumeInfos(name string) ([]*internalVolume.VolumeInfo, error) // BackupExists checks if the backup metadata file exists in object storage. BackupExists(bucket, backupName string) (bool, error) @@ -493,8 +495,8 @@ func (s *objectBackupStore) GetPodVolumeBackups(name string) ([]*velerov1api.Pod return podVolumeBackups, nil } -func (s *objectBackupStore) GetBackupVolumeInfos(name string) (*volume.VolumeInfos, error) { - var volumeInfos *volume.VolumeInfos +func (s *objectBackupStore) GetBackupVolumeInfos(name string) ([]*internalVolume.VolumeInfo, error) { + volumeInfos := make([]*internalVolume.VolumeInfo, 0) res, err := tryGet(s.objectStore, s.bucket, s.layout.getBackupVolumeInfoKey(name)) if err != nil { diff --git a/pkg/persistence/object_store_test.go b/pkg/persistence/object_store_test.go index ba6a7bcb8..81ca9b050 100644 --- a/pkg/persistence/object_store_test.go +++ b/pkg/persistence/object_store_test.go @@ -34,6 +34,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "github.com/vmware-tanzu/velero/internal/credentials" + internalVolume "github.com/vmware-tanzu/velero/internal/volume" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/builder" "github.com/vmware-tanzu/velero/pkg/itemoperation" @@ -1067,27 +1068,25 @@ func TestNewObjectBackupStoreGetterConfig(t *testing.T) { func TestGetBackupVolumeInfos(t *testing.T) { tests := []struct { name string - volumeInfo *volume.VolumeInfos + volumeInfo []*internalVolume.VolumeInfo volumeInfoStr string expectedErr string - expectedResult []volume.VolumeInfo + expectedResult []*internalVolume.VolumeInfo }{ { name: "No VolumeInfos, expect no error.", }, { name: "Valid VolumeInfo, should pass.", - volumeInfo: &volume.VolumeInfos{ - VolumeInfos: []volume.VolumeInfo{ - { - PVCName: "pvcName", - PVName: "pvName", - Skipped: true, - SnapshotDataMoved: false, - }, + volumeInfo: []*internalVolume.VolumeInfo{ + { + PVCName: "pvcName", + PVName: "pvName", + Skipped: true, + SnapshotDataMoved: false, }, }, - expectedResult: []volume.VolumeInfo{ + expectedResult: []*internalVolume.VolumeInfo{ { PVCName: "pvcName", PVName: "pvName", @@ -1098,8 +1097,8 @@ func TestGetBackupVolumeInfos(t *testing.T) { }, { name: "Invalid VolumeInfo string, should also pass.", - volumeInfoStr: `{"volumeInfos": [{"abc": "123", "def": "456", "pvcName": "pvcName"}]}`, - expectedResult: []volume.VolumeInfo{ + volumeInfoStr: `[{"abc": "123", "def": "456", "pvcName": "pvcName"}]`, + expectedResult: []*internalVolume.VolumeInfo{ { PVCName: "pvcName", }, @@ -1141,7 +1140,7 @@ func TestGetBackupVolumeInfos(t *testing.T) { } if len(tc.expectedResult) > 0 { - require.Equal(t, tc.expectedResult, result.VolumeInfos) + require.Equal(t, tc.expectedResult, result) } }) diff --git a/pkg/podvolume/backupper.go b/pkg/podvolume/backupper.go index 3239f10f2..f1575f21d 100644 --- a/pkg/podvolume/backupper.go +++ b/pkg/podvolume/backupper.go @@ -170,6 +170,31 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api. } log.Infof("pod %s/%s has volumes to backup: %v", pod.Namespace, pod.Name, volumesToBackup) + var ( + pvcSummary = NewPVCBackupSummary() + podVolumes = make(map[string]corev1api.Volume) + errs = []error{} + ) + + // put the pod's volumes and the PVC associated in maps for efficient lookup below + for _, podVolume := range pod.Spec.Volumes { + podVolumes[podVolume.Name] = podVolume + if podVolume.PersistentVolumeClaim != nil { + pvc := new(corev1api.PersistentVolumeClaim) + err := b.crClient.Get(context.TODO(), ctrlclient.ObjectKey{Namespace: pod.Namespace, Name: podVolume.PersistentVolumeClaim.ClaimName}, pvc) + if err != nil { + errs = append(errs, errors.Wrap(err, "error getting persistent volume claim for volume")) + continue + } + pvcSummary.pvcMap[podVolume.Name] = pvc + } + } + + if err := kube.IsPodRunning(pod); err != nil { + skipAllPodVolumes(pod, volumesToBackup, err, pvcSummary, log) + return nil, pvcSummary, nil + } + err := nodeagent.IsRunningInNode(b.ctx, backup.Namespace, pod.Spec.NodeName, b.crClient) if err != nil { return nil, nil, []error{err} @@ -198,36 +223,10 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api. b.resultsLock.Unlock() var ( - errs []error podVolumeBackups []*velerov1api.PodVolumeBackup - podVolumes = make(map[string]corev1api.Volume) mountedPodVolumes = sets.String{} attachedPodDevices = sets.String{} ) - pvcSummary := NewPVCBackupSummary() - - // put the pod's volumes and the PVC associated in maps for efficient lookup below - for _, podVolume := range pod.Spec.Volumes { - podVolumes[podVolume.Name] = podVolume - if podVolume.PersistentVolumeClaim != nil { - pvc := new(corev1api.PersistentVolumeClaim) - err := b.crClient.Get(context.TODO(), ctrlclient.ObjectKey{Namespace: pod.Namespace, Name: podVolume.PersistentVolumeClaim.ClaimName}, pvc) - if err != nil { - errs = append(errs, errors.Wrap(err, "error getting persistent volume claim for volume")) - continue - } - pvcSummary.pvcMap[podVolume.Name] = pvc - } - } - - if err := kube.IsPodRunning(pod); err != nil { - for _, volumeName := range volumesToBackup { - err := errors.Wrapf(err, "backup for volume %s is skipped", volumeName) - log.WithError(err).Warn("Skip pod volume") - pvcSummary.addSkipped(volumeName, fmt.Sprintf("the pod the PVC is mounted to, %s/%s, is not running", pod.Namespace, pod.Name)) - } - return nil, pvcSummary, nil - } for _, container := range pod.Spec.Containers { for _, volumeMount := range container.VolumeMounts { @@ -333,6 +332,13 @@ ForEachVolume: return podVolumeBackups, pvcSummary, errs } +func skipAllPodVolumes(pod *corev1api.Pod, volumesToBackup []string, err error, pvcSummary *PVCBackupSummary, log logrus.FieldLogger) { + for _, volumeName := range volumesToBackup { + log.WithError(err).Warnf("Skip pod volume %s", volumeName) + pvcSummary.addSkipped(volumeName, fmt.Sprintf("encountered a problem with backing up the PVC of pod %s/%s: %v", pod.Namespace, pod.Name, err)) + } +} + // isHostPathVolume returns true if the volume is either a hostPath pod volume or a persistent // volume claim on a hostPath persistent volume, or false otherwise. func isHostPathVolume(volume *corev1api.Volume, pvc *corev1api.PersistentVolumeClaim, crClient ctrlclient.Client) (bool, error) { diff --git a/pkg/restore/pv_restorer.go b/pkg/restore/pv_restorer.go index 03578dcfe..a2565e58f 100644 --- a/pkg/restore/pv_restorer.go +++ b/pkg/restore/pv_restorer.go @@ -26,6 +26,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "github.com/vmware-tanzu/velero/internal/credentials" + internalVolume "github.com/vmware-tanzu/velero/internal/volume" api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/util/boolptr" "github.com/vmware-tanzu/velero/pkg/volume" @@ -132,7 +133,7 @@ func getSnapshotInfo(pvName string, backup *api.Backup, volumeSnapshots []*volum } // add credential to config - err = volume.UpdateVolumeSnapshotLocationWithCredentialConfig(snapshotLocation, credentialStore) + err = internalVolume.UpdateVolumeSnapshotLocationWithCredentialConfig(snapshotLocation, credentialStore) if err != nil { return nil, errors.WithStack(err) } diff --git a/pkg/restore/request.go b/pkg/restore/request.go index 2a267a5ff..5d7f6f929 100644 --- a/pkg/restore/request.go +++ b/pkg/restore/request.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "github.com/vmware-tanzu/velero/internal/resourcemodifiers" + internalVolume "github.com/vmware-tanzu/velero/internal/volume" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/itemoperation" "github.com/vmware-tanzu/velero/pkg/volume" @@ -62,6 +63,7 @@ type Request struct { ResourceModifiers *resourcemodifiers.ResourceModifiers DisableInformerCache bool CSIVolumeSnapshots []*snapshotv1api.VolumeSnapshot + VolumeInfoMap map[string]internalVolume.VolumeInfo } type restoredItemStatus struct { diff --git a/pkg/restore/restore.go b/pkg/restore/restore.go index 57a6156ef..b4c7e9e82 100644 --- a/pkg/restore/restore.go +++ b/pkg/restore/restore.go @@ -53,6 +53,7 @@ import ( "github.com/vmware-tanzu/velero/internal/credentials" "github.com/vmware-tanzu/velero/internal/hook" "github.com/vmware-tanzu/velero/internal/resourcemodifiers" + internalVolume "github.com/vmware-tanzu/velero/internal/volume" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/archive" "github.com/vmware-tanzu/velero/pkg/client" @@ -326,6 +327,7 @@ func (kr *kubernetesRestorer) RestoreWithResolvers( disableInformerCache: req.DisableInformerCache, featureVerifier: kr.featureVerifier, hookTracker: hook.NewHookTracker(), + volumeInfoMap: req.VolumeInfoMap, } return restoreCtx.execute() @@ -379,6 +381,7 @@ type restoreContext struct { disableInformerCache bool featureVerifier features.Verifier hookTracker *hook.HookTracker + volumeInfoMap map[string]internalVolume.VolumeInfo } type resourceClientKey struct { @@ -1122,15 +1125,17 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso itemExists := false resourceID := getResourceID(groupResource, namespace, obj.GetName()) + restoreLogger := ctx.log.WithFields(logrus.Fields{ + "namespace": obj.GetNamespace(), + "name": obj.GetName(), + "groupResource": groupResource.String(), + }) + // Check if group/resource should be restored. We need to do this here since // this method may be getting called for an additional item which is a group/resource // that's excluded. if !ctx.resourceIncludesExcludes.ShouldInclude(groupResource.String()) { - ctx.log.WithFields(logrus.Fields{ - "namespace": obj.GetNamespace(), - "name": obj.GetName(), - "groupResource": groupResource.String(), - }).Info("Not restoring item because resource is excluded") + restoreLogger.Info("Not restoring item because resource is excluded") return warnings, errs, itemExists } @@ -1142,11 +1147,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso // to check the *original* namespace, not the remapped one if it's been remapped. if namespace != "" { if !ctx.namespaceIncludesExcludes.ShouldInclude(obj.GetNamespace()) && !ctx.resourceMustHave.Has(groupResource.String()) { - ctx.log.WithFields(logrus.Fields{ - "namespace": obj.GetNamespace(), - "name": obj.GetName(), - "groupResource": groupResource.String(), - }).Info("Not restoring item because namespace is excluded") + restoreLogger.Info("Not restoring item because namespace is excluded") return warnings, errs, itemExists } @@ -1170,11 +1171,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso } } else { if boolptr.IsSetToFalse(ctx.restore.Spec.IncludeClusterResources) { - ctx.log.WithFields(logrus.Fields{ - "namespace": obj.GetNamespace(), - "name": obj.GetName(), - "groupResource": groupResource.String(), - }).Info("Not restoring item because it's cluster-scoped") + restoreLogger.Info("Not restoring item because it's cluster-scoped") return warnings, errs, itemExists } } @@ -1238,168 +1235,111 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso } if groupResource == kuberesource.PersistentVolumes { - switch { - case hasSnapshot(name, ctx.volumeSnapshots): - oldName := obj.GetName() - shouldRenamePV, err := shouldRenamePV(ctx, obj, resourceClient) - if err != nil { - errs.Add(namespace, err) - return warnings, errs, itemExists - } + if volumeInfo, ok := ctx.volumeInfoMap[obj.GetName()]; ok { + ctx.log.Infof("Find VolumeInfo for PV %s.", obj.GetName()) - // Check to see if the claimRef.namespace field needs to be remapped, - // and do so if necessary. - _, err = remapClaimRefNS(ctx, obj) - if err != nil { - errs.Add(namespace, err) - return warnings, errs, itemExists - } - - var shouldRestoreSnapshot bool - if !shouldRenamePV { - // Check if the PV exists in the cluster before attempting to create - // a volume from the snapshot, in order to avoid orphaned volumes (GH #609) - shouldRestoreSnapshot, err = ctx.shouldRestore(name, resourceClient) + switch volumeInfo.BackupMethod { + case internalVolume.NativeSnapshot: + obj, err = ctx.handlePVHasNativeSnapshot(obj, resourceClient) if err != nil { - errs.Add(namespace, errors.Wrapf(err, "error waiting on in-cluster persistentvolume %s", name)) + errs.Add(namespace, err) return warnings, errs, itemExists } - } else { - // If we're renaming the PV, we're going to give it a new random name, - // so we can assume it doesn't already exist in the cluster and therefore - // we should proceed with restoring from snapshot. - shouldRestoreSnapshot = true - } - if shouldRestoreSnapshot { - // Reset the PV's binding status so that Kubernetes can properly - // associate it with the restored PVC. - obj = resetVolumeBindingInfo(obj) + name = obj.GetName() - // Even if we're renaming the PV, obj still has the old name here, because the pvRestorer - // uses the original name to look up metadata about the snapshot. - ctx.log.Infof("Restoring persistent volume from snapshot.") - updatedObj, err := ctx.pvRestorer.executePVAction(obj) - if err != nil { - errs.Add(namespace, fmt.Errorf("error executing PVAction for %s: %v", resourceID, err)) + case internalVolume.PodVolumeBackup: + restoreLogger.Infof("Dynamically re-provisioning persistent volume because it has a pod volume backup to be restored.") + ctx.pvsToProvision.Insert(name) + + // Return early because we don't want to restore the PV itself, we + // want to dynamically re-provision it. + return warnings, errs, itemExists + + case internalVolume.CSISnapshot: + restoreLogger.Infof("Dynamically re-provisioning persistent volume because it has a CSI VolumeSnapshot or a related snapshot DataUpload.") + ctx.pvsToProvision.Insert(name) + + if ready, err := ctx.featureVerifier.Verify(velerov1api.CSIFeatureFlag); !ready { + ctx.log.Errorf("Failed to verify CSI modules, ready %v, err %v", ready, err) + errs.Add(namespace, fmt.Errorf("CSI modules are not ready for restore. Check CSI feature is enabled and CSI plugin is installed")) + } + + // Return early because we don't want to restore the PV itself, we + // want to dynamically re-provision it. + return warnings, errs, itemExists + + // When the PV data is skipped from backup, it's VolumeInfo BackupMethod + // is not set, and it will fall into the default case. + default: + if hasDeleteReclaimPolicy(obj.Object) { + restoreLogger.Infof("Dynamically re-provisioning persistent volume because it doesn't have a snapshot and its reclaim policy is Delete.") + ctx.pvsToProvision.Insert(name) + + // Return early because we don't want to restore the PV itself, we + // want to dynamically re-provision it. return warnings, errs, itemExists - } - obj = updatedObj - - // VolumeSnapshotter has modified the PV name, we should rename the PV. - if oldName != obj.GetName() { - shouldRenamePV = true - } - } - - if shouldRenamePV { - var pvName string - if oldName == obj.GetName() { - // pvRestorer hasn't modified the PV name, we need to rename the PV. - pvName, err = ctx.pvRenamer(oldName) + } else { + obj, err = ctx.handleSkippedPVHasRetainPolicy(obj, resourceID, restoreLogger) if err != nil { - errs.Add(namespace, errors.Wrapf(err, "error renaming PV")) + errs.Add(namespace, err) return warnings, errs, itemExists } - } else { - // VolumeSnapshotter could have modified the PV name through - // function `SetVolumeID`, - pvName = obj.GetName() + } + } + } else { + // TODO: VolumeInfo is adopted and old logic is deprecated in v1.13. + // Remove the old logic in v1.15. + ctx.log.Infof("Cannot find VolumeInfo for PV %s.", obj.GetName()) + + switch { + case hasSnapshot(name, ctx.volumeSnapshots): + obj, err = ctx.handlePVHasNativeSnapshot(obj, resourceClient) + if err != nil { + errs.Add(namespace, err) + return warnings, errs, itemExists } - ctx.renamedPVs[oldName] = pvName - obj.SetName(pvName) - name = pvName + name = obj.GetName() - // Add the original PV name as an annotation. - annotations := obj.GetAnnotations() - if annotations == nil { - annotations = map[string]string{} + case hasPodVolumeBackup(obj, ctx): + restoreLogger.Infof("Dynamically re-provisioning persistent volume because it has a pod volume backup to be restored.") + ctx.pvsToProvision.Insert(name) + + // Return early because we don't want to restore the PV itself, we + // want to dynamically re-provision it. + return warnings, errs, itemExists + + case hasCSIVolumeSnapshot(ctx, obj): + fallthrough + case hasSnapshotDataUpload(ctx, obj): + restoreLogger.Infof("Dynamically re-provisioning persistent volume because it has a CSI VolumeSnapshot or a related snapshot DataUpload.") + ctx.pvsToProvision.Insert(name) + + if ready, err := ctx.featureVerifier.Verify(velerov1api.CSIFeatureFlag); !ready { + ctx.log.Errorf("Failed to verify CSI modules, ready %v, err %v", ready, err) + errs.Add(namespace, fmt.Errorf("CSI modules are not ready for restore. Check CSI feature is enabled and CSI plugin is installed")) } - annotations["velero.io/original-pv-name"] = oldName - obj.SetAnnotations(annotations) - } - case hasPodVolumeBackup(obj, ctx): - ctx.log.WithFields(logrus.Fields{ - "namespace": obj.GetNamespace(), - "name": obj.GetName(), - "groupResource": groupResource.String(), - }).Infof("Dynamically re-provisioning persistent volume because it has a pod volume backup to be restored.") - ctx.pvsToProvision.Insert(name) - - // Return early because we don't want to restore the PV itself, we - // want to dynamically re-provision it. - return warnings, errs, itemExists - - case hasCSIVolumeSnapshot(ctx, obj): - ctx.log.WithFields(logrus.Fields{ - "namespace": obj.GetNamespace(), - "name": obj.GetName(), - "groupResource": groupResource.String(), - }).Infof("Dynamically re-provisioning persistent volume because it has a related CSI VolumeSnapshot.") - ctx.pvsToProvision.Insert(name) - - if ready, err := ctx.featureVerifier.Verify(velerov1api.CSIFeatureFlag); !ready { - ctx.log.Errorf("Failed to verify CSI modules, ready %v, err %v", ready, err) - errs.Add(namespace, fmt.Errorf("CSI modules are not ready for restore. Check CSI feature is enabled and CSI plugin is installed")) - } - - // Return early because we don't want to restore the PV itself, we - // want to dynamically re-provision it. - return warnings, errs, itemExists - - case hasSnapshotDataUpload(ctx, obj): - ctx.log.WithFields(logrus.Fields{ - "namespace": obj.GetNamespace(), - "name": obj.GetName(), - "groupResource": groupResource.String(), - }).Infof("Dynamically re-provisioning persistent volume because it has a related snapshot DataUpload.") - ctx.pvsToProvision.Insert(name) - - if ready, err := ctx.featureVerifier.Verify(velerov1api.CSIFeatureFlag); !ready { - ctx.log.Errorf("Failed to verify CSI modules, ready %v, err %v", ready, err) - errs.Add(namespace, fmt.Errorf("CSI modules are not ready for restore. Check CSI feature is enabled and CSI plugin is installed")) - } - - // Return early because we don't want to restore the PV itself, we - // want to dynamically re-provision it. - return warnings, errs, itemExists - - case hasDeleteReclaimPolicy(obj.Object): - ctx.log.WithFields(logrus.Fields{ - "namespace": obj.GetNamespace(), - "name": obj.GetName(), - "groupResource": groupResource.String(), - }).Infof("Dynamically re-provisioning persistent volume because it doesn't have a snapshot and its reclaim policy is Delete.") - ctx.pvsToProvision.Insert(name) - - // Return early because we don't want to restore the PV itself, we - // want to dynamically re-provision it. - return warnings, errs, itemExists - - default: - ctx.log.WithFields(logrus.Fields{ - "namespace": obj.GetNamespace(), - "name": obj.GetName(), - "groupResource": groupResource.String(), - }).Infof("Restoring persistent volume as-is because it doesn't have a snapshot and its reclaim policy is not Delete.") - - // Check to see if the claimRef.namespace field needs to be remapped, and do so if necessary. - _, err = remapClaimRefNS(ctx, obj) - if err != nil { - errs.Add(namespace, err) + // Return early because we don't want to restore the PV itself, we + // want to dynamically re-provision it. return warnings, errs, itemExists - } - obj = resetVolumeBindingInfo(obj) - // We call the pvRestorer here to clear out the PV's claimRef.UID, - // so it can be re-claimed when its PVC is restored and gets a new UID. - updatedObj, err := ctx.pvRestorer.executePVAction(obj) - if err != nil { - errs.Add(namespace, fmt.Errorf("error executing PVAction for %s: %v", resourceID, err)) + + case hasDeleteReclaimPolicy(obj.Object): + restoreLogger.Infof("Dynamically re-provisioning persistent volume because it doesn't have a snapshot and its reclaim policy is Delete.") + ctx.pvsToProvision.Insert(name) + + // Return early because we don't want to restore the PV itself, we + // want to dynamically re-provision it. return warnings, errs, itemExists + + default: + obj, err = ctx.handleSkippedPVHasRetainPolicy(obj, resourceID, restoreLogger) + if err != nil { + errs.Add(namespace, err) + return warnings, errs, itemExists + } } - obj = updatedObj } } @@ -2491,3 +2431,105 @@ func (ctx *restoreContext) processUpdateResourcePolicy(fromCluster, fromClusterW } return warnings, errs } + +func (ctx *restoreContext) handlePVHasNativeSnapshot(obj *unstructured.Unstructured, resourceClient client.Dynamic) (*unstructured.Unstructured, error) { + retObj := obj.DeepCopy() + oldName := obj.GetName() + shouldRenamePV, err := shouldRenamePV(ctx, retObj, resourceClient) + if err != nil { + return nil, err + } + + // Check to see if the claimRef.namespace field needs to be remapped, + // and do so if necessary. + _, err = remapClaimRefNS(ctx, retObj) + if err != nil { + return nil, err + } + + var shouldRestoreSnapshot bool + if !shouldRenamePV { + // Check if the PV exists in the cluster before attempting to create + // a volume from the snapshot, in order to avoid orphaned volumes (GH #609) + shouldRestoreSnapshot, err = ctx.shouldRestore(oldName, resourceClient) + if err != nil { + return nil, errors.Wrapf(err, "error waiting on in-cluster persistentvolume %s", oldName) + } + } else { + // If we're renaming the PV, we're going to give it a new random name, + // so we can assume it doesn't already exist in the cluster and therefore + // we should proceed with restoring from snapshot. + shouldRestoreSnapshot = true + } + + if shouldRestoreSnapshot { + // Reset the PV's binding status so that Kubernetes can properly + // associate it with the restored PVC. + retObj = resetVolumeBindingInfo(retObj) + + // Even if we're renaming the PV, obj still has the old name here, because the pvRestorer + // uses the original name to look up metadata about the snapshot. + ctx.log.Infof("Restoring persistent volume from snapshot.") + retObj, err = ctx.pvRestorer.executePVAction(retObj) + if err != nil { + return nil, fmt.Errorf("error executing PVAction for %s: %v", getResourceID(kuberesource.PersistentVolumes, "", oldName), err) + } + + // VolumeSnapshotter has modified the PV name, we should rename the PV. + if oldName != retObj.GetName() { + shouldRenamePV = true + } + } + + if shouldRenamePV { + var pvName string + if oldName == retObj.GetName() { + // pvRestorer hasn't modified the PV name, we need to rename the PV. + pvName, err = ctx.pvRenamer(oldName) + if err != nil { + return nil, errors.Wrapf(err, "error renaming PV") + } + } else { + // VolumeSnapshotter could have modified the PV name through + // function `SetVolumeID`, + pvName = retObj.GetName() + } + + ctx.renamedPVs[oldName] = pvName + retObj.SetName(pvName) + + // Add the original PV name as an annotation. + annotations := retObj.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + annotations["velero.io/original-pv-name"] = oldName + retObj.SetAnnotations(annotations) + } + + return retObj, nil +} + +func (ctx *restoreContext) handleSkippedPVHasRetainPolicy( + obj *unstructured.Unstructured, + resourceID string, + logger logrus.FieldLogger, +) (*unstructured.Unstructured, error) { + logger.Infof("Restoring persistent volume as-is because it doesn't have a snapshot and its reclaim policy is not Delete.") + + // Check to see if the claimRef.namespace field needs to be remapped, and do so if necessary. + if _, err := remapClaimRefNS(ctx, obj); err != nil { + return nil, err + } + + obj = resetVolumeBindingInfo(obj) + + // We call the pvRestorer here to clear out the PV's claimRef.UID, + // so it can be re-claimed when its PVC is restored and gets a new UID. + updatedObj, err := ctx.pvRestorer.executePVAction(obj) + if err != nil { + return nil, fmt.Errorf("error executing PVAction for %s: %v", resourceID, err) + } + + return updatedObj, nil +} diff --git a/pkg/restore/restore_test.go b/pkg/restore/restore_test.go index d2f86e3c7..65cfbf5a9 100644 --- a/pkg/restore/restore_test.go +++ b/pkg/restore/restore_test.go @@ -42,11 +42,13 @@ import ( "k8s.io/client-go/dynamic" kubetesting "k8s.io/client-go/testing" + internalVolume "github.com/vmware-tanzu/velero/internal/volume" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/archive" "github.com/vmware-tanzu/velero/pkg/builder" "github.com/vmware-tanzu/velero/pkg/client" "github.com/vmware-tanzu/velero/pkg/discovery" + "github.com/vmware-tanzu/velero/pkg/features" verifiermocks "github.com/vmware-tanzu/velero/pkg/features/mocks" "github.com/vmware-tanzu/velero/pkg/itemoperation" "github.com/vmware-tanzu/velero/pkg/kuberesource" @@ -61,6 +63,199 @@ import ( "github.com/vmware-tanzu/velero/pkg/volume" ) +func TestRestorePVWithVolumeInfo(t *testing.T) { + tests := []struct { + name string + restore *velerov1api.Restore + backup *velerov1api.Backup + apiResources []*test.APIResource + tarball io.Reader + want map[*test.APIResource][]string + volumeInfoMap map[string]internalVolume.VolumeInfo + }{ + { + name: "Restore PV with native snapshot", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: test.NewTarWriter(t). + AddItems("persistentvolumes", + builder.ForPersistentVolume("pv-1").ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain).Result(), + ).Done(), + apiResources: []*test.APIResource{ + test.PVs(), + }, + volumeInfoMap: map[string]internalVolume.VolumeInfo{ + "pv-1": { + BackupMethod: internalVolume.NativeSnapshot, + PVName: "pv-1", + NativeSnapshotInfo: internalVolume.NativeSnapshotInfo{ + SnapshotHandle: "testSnapshotHandle", + }, + }, + }, + want: map[*test.APIResource][]string{ + test.PVs(): {"/pv-1"}, + }, + }, + { + name: "Restore PV with PVB", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: test.NewTarWriter(t). + AddItems("persistentvolumes", + builder.ForPersistentVolume("pv-1").ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain).Result(), + ).Done(), + apiResources: []*test.APIResource{ + test.PVs(), + }, + volumeInfoMap: map[string]internalVolume.VolumeInfo{ + "pv-1": { + BackupMethod: internalVolume.PodVolumeBackup, + PVName: "pv-1", + PVBInfo: internalVolume.PodVolumeBackupInfo{ + SnapshotHandle: "testSnapshotHandle", + Size: 100, + NodeName: "testNode", + }, + }, + }, + want: map[*test.APIResource][]string{ + test.PVs(): {}, + }, + }, + { + name: "Restore PV with CSI VolumeSnapshot", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: test.NewTarWriter(t). + AddItems("persistentvolumes", + builder.ForPersistentVolume("pv-1").ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain).Result(), + ).Done(), + apiResources: []*test.APIResource{ + test.PVs(), + }, + volumeInfoMap: map[string]internalVolume.VolumeInfo{ + "pv-1": { + BackupMethod: internalVolume.CSISnapshot, + SnapshotDataMoved: false, + PVName: "pv-1", + CSISnapshotInfo: internalVolume.CSISnapshotInfo{ + Driver: "pd.csi.storage.gke.io", + }, + }, + }, + want: map[*test.APIResource][]string{ + test.PVs(): {}, + }, + }, + { + name: "Restore PV with DataUpload", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: test.NewTarWriter(t). + AddItems("persistentvolumes", + builder.ForPersistentVolume("pv-1").ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain).Result(), + ).Done(), + apiResources: []*test.APIResource{ + test.PVs(), + }, + volumeInfoMap: map[string]internalVolume.VolumeInfo{ + "pv-1": { + BackupMethod: internalVolume.CSISnapshot, + SnapshotDataMoved: true, + PVName: "pv-1", + CSISnapshotInfo: internalVolume.CSISnapshotInfo{ + Driver: "pd.csi.storage.gke.io", + }, + SnapshotDataMovementInfo: internalVolume.SnapshotDataMovementInfo{ + DataMover: "velero", + }, + }, + }, + want: map[*test.APIResource][]string{ + test.PVs(): {}, + }, + }, + { + name: "Restore PV with ClaimPolicy as Delete", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: test.NewTarWriter(t). + AddItems("persistentvolumes", + builder.ForPersistentVolume("pv-1").ReclaimPolicy(corev1api.PersistentVolumeReclaimDelete).Result(), + ).Done(), + apiResources: []*test.APIResource{ + test.PVs(), + }, + volumeInfoMap: map[string]internalVolume.VolumeInfo{ + "pv-1": { + PVName: "pv-1", + Skipped: true, + }, + }, + want: map[*test.APIResource][]string{ + test.PVs(): {}, + }, + }, + { + name: "Restore PV with ClaimPolicy as Retain", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: test.NewTarWriter(t). + AddItems("persistentvolumes", + builder.ForPersistentVolume("pv-1").ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain).Result(), + ).Done(), + apiResources: []*test.APIResource{ + test.PVs(), + }, + volumeInfoMap: map[string]internalVolume.VolumeInfo{ + "pv-1": { + PVName: "pv-1", + Skipped: true, + }, + }, + want: map[*test.APIResource][]string{ + test.PVs(): {"/pv-1"}, + }, + }, + } + + features.Enable("EnableCSI") + finder := new(verifiermocks.PluginFinder) + finder.On("Find", mock.Anything, mock.Anything).Return(true) + verifier := features.NewVerifier(finder) + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + h := newHarness(t) + + for _, r := range tc.apiResources { + h.DiscoveryClient.WithAPIResource(r) + } + require.NoError(t, h.restorer.discoveryHelper.Refresh()) + h.restorer.featureVerifier = verifier + + data := &Request{ + Log: h.log, + Restore: tc.restore, + Backup: tc.backup, + PodVolumeBackups: nil, + VolumeSnapshots: nil, + BackupReader: tc.tarball, + VolumeInfoMap: tc.volumeInfoMap, + } + warnings, errs := h.restorer.Restore( + data, + nil, // restoreItemActions + nil, // volume snapshotter getter + ) + + assertEmptyResults(t, warnings, errs) + assertAPIContents(t, h, tc.want) + }) + } +} + // TestRestoreResourceFiltering runs restores with different combinations // of resource filters (included/excluded resources, included/excluded // namespaces, label selectors, "include cluster resources" flag), and diff --git a/pkg/util/kube/list_watch_test.go b/pkg/util/kube/list_watch_test.go index cf7056f5f..8f70e3751 100644 --- a/pkg/util/kube/list_watch_test.go +++ b/pkg/util/kube/list_watch_test.go @@ -26,7 +26,6 @@ import ( kbclient "sigs.k8s.io/controller-runtime/pkg/client" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - cmdtest "github.com/vmware-tanzu/velero/pkg/cmd/test" velerotest "github.com/vmware-tanzu/velero/pkg/test" ) @@ -35,7 +34,7 @@ func TestInternalLW(t *testing.T) { client := velerotest.NewFakeControllerRuntimeClient(t).(kbclient.WithWatch) lw := InternalLW{ Client: client, - Namespace: cmdtest.VeleroNameSpace, + Namespace: "velero", ObjectList: new(velerov1api.BackupList), } diff --git a/pkg/util/kube/pod.go b/pkg/util/kube/pod.go index c1464a3d6..d93657878 100644 --- a/pkg/util/kube/pod.go +++ b/pkg/util/kube/pod.go @@ -17,6 +17,7 @@ package kube import ( "context" + "fmt" "time" "github.com/pkg/errors" @@ -110,3 +111,23 @@ func EnsureDeletePod(ctx context.Context, podGetter corev1client.CoreV1Interface return nil } + +// IsPodUnrecoverable checks if the pod is in an abnormal state and could not be recovered +// It could not cover all the cases but we could add more cases in the future +func IsPodUnrecoverable(pod *corev1api.Pod, log logrus.FieldLogger) (bool, string) { + // Check the Phase field + if pod.Status.Phase == corev1api.PodFailed || pod.Status.Phase == corev1api.PodUnknown { + log.Warnf("Pod is in abnormal state %s", pod.Status.Phase) + return true, fmt.Sprintf("Pod is in abnormal state %s", pod.Status.Phase) + } + + // Check the Status field + for _, containerStatus := range pod.Status.ContainerStatuses { + // If the container's image state is ImagePullBackOff, it indicates an image pull failure + if containerStatus.State.Waiting != nil && (containerStatus.State.Waiting.Reason == "ImagePullBackOff" || containerStatus.State.Waiting.Reason == "ErrImageNeverPull") { + log.Warnf("Container %s in Pod %s/%s is in pull image failed with reason %s", containerStatus.Name, pod.Namespace, pod.Name, containerStatus.State.Waiting.Reason) + return true, fmt.Sprintf("Container %s in Pod %s/%s is in pull image failed with reason %s", containerStatus.Name, pod.Namespace, pod.Name, containerStatus.State.Waiting.Reason) + } + } + return false, "" +} diff --git a/pkg/util/kube/pod_test.go b/pkg/util/kube/pod_test.go index 6f39c0b23..f1cdac043 100644 --- a/pkg/util/kube/pod_test.go +++ b/pkg/util/kube/pod_test.go @@ -343,3 +343,82 @@ func TestDeletePodIfAny(t *testing.T) { }) } } + +func TestIsPodUnrecoverable(t *testing.T) { + tests := []struct { + name string + pod *corev1api.Pod + want bool + }{ + { + name: "pod is in failed state", + pod: &corev1api.Pod{ + Status: corev1api.PodStatus{ + Phase: corev1api.PodFailed, + }, + }, + want: true, + }, + { + name: "pod is in unknown state", + pod: &corev1api.Pod{ + Status: corev1api.PodStatus{ + Phase: corev1api.PodUnknown, + }, + }, + want: true, + }, + { + name: "container image pull failure", + pod: &corev1api.Pod{ + Status: corev1api.PodStatus{ + ContainerStatuses: []corev1api.ContainerStatus{ + {State: corev1api.ContainerState{Waiting: &corev1api.ContainerStateWaiting{Reason: "ImagePullBackOff"}}}, + }, + }, + }, + want: true, + }, + { + name: "container image pull failure with different reason", + pod: &corev1api.Pod{ + Status: corev1api.PodStatus{ + ContainerStatuses: []corev1api.ContainerStatus{ + {State: corev1api.ContainerState{Waiting: &corev1api.ContainerStateWaiting{Reason: "ErrImageNeverPull"}}}, + }, + }, + }, + want: true, + }, + { + name: "container image pull failure with different reason", + pod: &corev1api.Pod{ + Status: corev1api.PodStatus{ + ContainerStatuses: []corev1api.ContainerStatus{ + {State: corev1api.ContainerState{Waiting: &corev1api.ContainerStateWaiting{Reason: "OtherReason"}}}, + }, + }, + }, + want: false, + }, + { + name: "pod is normal", + pod: &corev1api.Pod{ + Status: corev1api.PodStatus{ + Phase: corev1api.PodRunning, + ContainerStatuses: []corev1api.ContainerStatus{ + {Ready: true, State: corev1api.ContainerState{Running: &corev1api.ContainerStateRunning{}}}, + }, + }, + }, + want: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got, _ := IsPodUnrecoverable(test.pod, velerotest.NewLogger()) + assert.Equal(t, test.want, got) + }) + } +} diff --git a/pkg/util/logging/log_counter_hook.go b/pkg/util/logging/log_counter_hook.go index 7ee37e660..63c9342df 100644 --- a/pkg/util/logging/log_counter_hook.go +++ b/pkg/util/logging/log_counter_hook.go @@ -60,8 +60,11 @@ func (h *LogHook) Fire(entry *logrus.Entry) error { namespace, isNamespacePresent := entry.Data["namespace"] errorField, isErrorFieldPresent := entry.Data["error"] + // When JSON logging format is enabled, error message is placed at "error.message" instead of "error" + errorMsgField, isErrorMsgFieldPresent := entry.Data["error.message"] resourceField, isResourceFieldPresent := entry.Data["resource"] nameField, isNameFieldPresent := entry.Data["name"] + msgField, isMsgFieldPresent := entry.Message, true entryMessage := "" if isResourceFieldPresent { @@ -70,9 +73,15 @@ func (h *LogHook) Fire(entry *logrus.Entry) error { if isNameFieldPresent { entryMessage = fmt.Sprintf("%s name: /%s", entryMessage, nameField.(string)) } + if isMsgFieldPresent { + entryMessage = fmt.Sprintf("%s message: /%v", entryMessage, msgField) + } if isErrorFieldPresent { entryMessage = fmt.Sprintf("%s error: /%v", entryMessage, errorField) } + if isErrorMsgFieldPresent { + entryMessage = fmt.Sprintf("%s error: /%v", entryMessage, errorMsgField) + } if isNamespacePresent { h.entries[entry.Level].Add(namespace.(string), errors.New(entryMessage)) diff --git a/pkg/util/logging/log_counter_hook_test.go b/pkg/util/logging/log_counter_hook_test.go new file mode 100644 index 000000000..204354314 --- /dev/null +++ b/pkg/util/logging/log_counter_hook_test.go @@ -0,0 +1,135 @@ +package logging + +import ( + "errors" + "fmt" + "testing" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + + "github.com/vmware-tanzu/velero/pkg/util/results" +) + +func TestLogHook_Fire(t *testing.T) { + hook := NewLogHook() + + entry := &logrus.Entry{ + Level: logrus.ErrorLevel, + Data: logrus.Fields{ + "namespace": "test-namespace", + "error": errors.New("test-error"), + "resource": "test-resource", + "name": "test-name", + }, + Message: "test-message", + } + + err := hook.Fire(entry) + assert.NoError(t, err) + + // Verify the counts + assert.Equal(t, 1, hook.counts[logrus.ErrorLevel]) + + // Verify the entries + expectedResult := &results.Result{} + expectedResult.Add("test-namespace", fmt.Errorf(" resource: /test-resource name: /test-name message: /test-message error: /%v", entry.Data["error"])) + assert.Equal(t, expectedResult, hook.entries[logrus.ErrorLevel]) + + entry1 := &logrus.Entry{ + Level: logrus.ErrorLevel, + Data: logrus.Fields{ + "error.message": errors.New("test-error"), + "resource": "test-resource", + "name": "test-name", + }, + Message: "test-message", + } + + err = hook.Fire(entry1) + assert.NoError(t, err) + + // Verify the counts + assert.Equal(t, 2, hook.counts[logrus.ErrorLevel]) + + // Verify the entries + expectedResult = &results.Result{} + expectedResult.Add("test-namespace", fmt.Errorf(" resource: /test-resource name: /test-name message: /test-message error: /%v", entry.Data["error"])) + expectedResult.AddVeleroError(fmt.Errorf(" resource: /test-resource name: /test-name message: /test-message error: /%v", entry1.Data["error.message"])) + assert.Equal(t, expectedResult, hook.entries[logrus.ErrorLevel]) +} + +func TestLogHook_Levels(t *testing.T) { + hook := NewLogHook() + + levels := hook.Levels() + + expectedLevels := []logrus.Level{ + logrus.PanicLevel, + logrus.FatalLevel, + logrus.ErrorLevel, + logrus.WarnLevel, + logrus.InfoLevel, + logrus.DebugLevel, + logrus.TraceLevel, + } + + assert.Equal(t, expectedLevels, levels) +} + +func TestLogHook_GetCount(t *testing.T) { + hook := NewLogHook() + + // Set up test data + hook.counts[logrus.ErrorLevel] = 5 + hook.counts[logrus.WarnLevel] = 10 + + // Test GetCount for ErrorLevel + count := hook.GetCount(logrus.ErrorLevel) + assert.Equal(t, 5, count) + + // Test GetCount for WarnLevel + count = hook.GetCount(logrus.WarnLevel) + assert.Equal(t, 10, count) + + // Test GetCount for other levels + count = hook.GetCount(logrus.InfoLevel) + assert.Equal(t, 0, count) + + count = hook.GetCount(logrus.DebugLevel) + assert.Equal(t, 0, count) +} + +func TestLogHook_GetEntries(t *testing.T) { + hook := NewLogHook() + + // Set up test data + entry := &logrus.Entry{ + Level: logrus.ErrorLevel, + Data: logrus.Fields{ + "namespace": "test-namespace", + "error": errors.New("test-error"), + "resource": "test-resource", + "name": "test-name", + }, + Message: "test-message", + } + expectedResult := &results.Result{} + expectedResult.Add("test-namespace", fmt.Errorf(" resource: /test-resource name: /test-name message: /test-message error: /%v", entry.Data["error"])) + hook.entries[logrus.ErrorLevel] = expectedResult + + // Test GetEntries for ErrorLevel + result := hook.GetEntries(logrus.ErrorLevel) + assert.Equal(t, *expectedResult, result) + + // Test GetEntries for WarnLevel + result = hook.GetEntries(logrus.WarnLevel) + assert.Equal(t, results.Result{}, result) + + // Test GetEntries for other levels + result = hook.GetEntries(logrus.InfoLevel) + assert.Equal(t, results.Result{}, result) + + result = hook.GetEntries(logrus.DebugLevel) + assert.Equal(t, results.Result{}, result) +} diff --git a/pkg/volume/volume_info_common.go b/pkg/volume/volume_info_common.go deleted file mode 100644 index 14ede0c6b..000000000 --- a/pkg/volume/volume_info_common.go +++ /dev/null @@ -1,160 +0,0 @@ -/* -Copyright 2018 the Velero contributors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package volume - -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -type VolumeBackupMethod string - -const ( - NativeSnapshot VolumeBackupMethod = "NativeSnapshot" - PodVolumeBackup VolumeBackupMethod = "PodVolumeBackup" - CSISnapshot VolumeBackupMethod = "CSISnapshot" -) - -type VolumeInfoVersion struct { - Version string `json:"version"` -} - -type VolumeInfos struct { - VolumeInfos []VolumeInfo `json:"volumeInfos"` -} - -type VolumeInfo struct { - // The PVC's name. - PVCName string `json:"pvcName,omitempty"` - - // The PVC's namespace - PVCNamespace string `json:"pvcNamespace,omitempty"` - - // The PV name. - PVName string `json:"pvName,omitempty"` - - // The way the volume data is backed up. The valid value includes `VeleroNativeSnapshot`, `PodVolumeBackup` and `CSISnapshot`. - BackupMethod VolumeBackupMethod `json:"backupMethod,omitempty"` - - // Whether the volume's snapshot data is moved to specified storage. - SnapshotDataMoved bool `json:"snapshotDataMoved"` - - // Whether the local snapshot is preserved after snapshot is moved. - // The local snapshot may be a result of CSI snapshot backup(no data movement) - // or a CSI snapshot data movement plus preserve local snapshot. - PreserveLocalSnapshot bool `json:"preserveLocalSnapshot"` - - // Whether the Volume is skipped in this backup. - Skipped bool `json:"skipped"` - - // The reason for the volume is skipped in the backup. - SkippedReason string `json:"skippedReason,omitempty"` - - // Snapshot starts timestamp. - StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"` - - // The Async Operation's ID. - OperationID string `json:"operationID,omitempty"` - - CSISnapshotInfo CSISnapshotInfo `json:"csiSnapshotInfo,omitempty"` - SnapshotDataMovementInfo SnapshotDataMovementInfo `json:"snapshotDataMovementInfo,omitempty"` - NativeSnapshotInfo NativeSnapshotInfo `json:"nativeSnapshotInfo,omitempty"` - PVBInfo PodVolumeBackupInfo `json:"pvbInfo,omitempty"` - PVInfo PVInfo `json:"pvInfo,omitempty"` -} - -// CSISnapshotInfo is used for displaying the CSI snapshot status -type CSISnapshotInfo struct { - // It's the storage provider's snapshot ID for CSI. - SnapshotHandle string `json:"snapshotHandle"` - - // The snapshot corresponding volume size. - Size int64 `json:"size"` - - // The name of the CSI driver. - Driver string `json:"driver"` - - // The name of the VolumeSnapshotContent. - VSCName string `json:"vscName"` -} - -// SnapshotDataMovementInfo is used for displaying the snapshot data mover status. -type SnapshotDataMovementInfo struct { - // The data mover used by the backup. The valid values are `velero` and ``(equals to `velero`). - DataMover string `json:"dataMover"` - - // The type of the uploader that uploads the snapshot data. The valid values are `kopia` and `restic`. - UploaderType string `json:"uploaderType"` - - // The name or ID of the snapshot associated object(SAO). - // SAO is used to support local snapshots for the snapshot data mover, - // e.g. it could be a VolumeSnapshot for CSI snapshot data movement. - RetainedSnapshot string `json:"retainedSnapshot"` - - // It's the filesystem repository's snapshot ID. - SnapshotHandle string `json:"snapshotHandle"` -} - -// NativeSnapshotInfo is used for displaying the Velero native snapshot status. -// A Velero Native Snapshot is a cloud storage snapshot taken by the Velero native -// plugins, e.g. velero-plugin-for-aws, velero-plugin-for-gcp, and -// velero-plugin-for-microsoft-azure. -type NativeSnapshotInfo struct { - // It's the storage provider's snapshot ID for the Velero-native snapshot. - SnapshotHandle string `json:"snapshotHandle"` - - // The cloud provider snapshot volume type. - VolumeType string `json:"volumeType"` - - // The cloud provider snapshot volume's availability zones. - VolumeAZ string `json:"volumeAZ"` - - // The cloud provider snapshot volume's IOPS. - IOPS string `json:"iops"` -} - -// PodVolumeBackupInfo is used for displaying the PodVolumeBackup snapshot status. -type PodVolumeBackupInfo struct { - // It's the file-system uploader's snapshot ID for PodVolumeBackup. - SnapshotHandle string `json:"snapshotHandle"` - - // The snapshot corresponding volume size. - Size int64 `json:"size"` - - // The type of the uploader that uploads the data. The valid values are `kopia` and `restic`. - UploaderType string `json:"uploaderType"` - - // The PVC's corresponding volume name used by Pod - // https://github.com/kubernetes/kubernetes/blob/e4b74dd12fa8cb63c174091d5536a10b8ec19d34/pkg/apis/core/types.go#L48 - VolumeName string `json:"volumeName"` - - // The Pod name mounting this PVC. - PodName string `json:"podName"` - - // The Pod namespace - PodNamespace string `json:"podNamespace"` - - // The PVB-taken k8s node's name. - NodeName string `json:"nodeName"` -} - -// PVInfo is used to store some PV information modified after creation. -// Those information are lost after PV recreation. -type PVInfo struct { - // ReclaimPolicy of PV. It could be different from the referenced StorageClass. - ReclaimPolicy string `json:"reclaimPolicy"` - - // The PV's labels should be kept after recreation. - Labels map[string]string `json:"labels"` -}