From fcad46ccdf2aa8505353e9cd09cea03e4aad33b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wenkai=20Yin=28=E5=B0=B9=E6=96=87=E5=BC=80=29?= Date: Tue, 11 Jan 2022 14:47:57 +0800 Subject: [PATCH] Check whether the volume is provisioned by CSI driver or not by the annotation as well MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Check whether the volume is provisioned by CSI driver or not by the annotation as well Fixes #4496 Signed-off-by: Wenkai Yin(尹文开) --- Tiltfile | 2 +- changelogs/unreleased/4513-ywk253100 | 1 + pkg/cmd/cli/restic/server.go | 2 + .../pod_volume_backup_controller.go | 2 +- .../pod_volume_restore_controller.go | 2 +- pkg/restore/restore.go | 12 +---- pkg/restore/restore_test.go | 13 +++--- pkg/util/kube/utils.go | 45 ++++++++++++++++++- pkg/util/kube/utils_test.go | 15 ++++++- 9 files changed, 72 insertions(+), 22 deletions(-) create mode 100644 changelogs/unreleased/4513-ywk253100 diff --git a/Tiltfile b/Tiltfile index 6ea745531..ba36e3346 100644 --- a/Tiltfile +++ b/Tiltfile @@ -103,7 +103,7 @@ local_resource( local_resource( "restic_binary", - cmd = 'cd ' + '.' + ';mkdir -p _tiltbuild/restic; BIN=velero GOOS=' + local_goos + ' GOARCH=amd64 RESTIC_VERSION=0.12.0 OUTPUT_DIR=_tiltbuild/restic ./hack/download-restic.sh', + cmd = 'cd ' + '.' + ';mkdir -p _tiltbuild/restic; BIN=velero GOOS=linux GOARCH=amd64 RESTIC_VERSION=0.12.0 OUTPUT_DIR=_tiltbuild/restic ./hack/download-restic.sh', ) # Note: we need a distro with a bash shell to exec into the Velero container diff --git a/changelogs/unreleased/4513-ywk253100 b/changelogs/unreleased/4513-ywk253100 new file mode 100644 index 000000000..62118cb61 --- /dev/null +++ b/changelogs/unreleased/4513-ywk253100 @@ -0,0 +1 @@ +Check whether the volume is provisioned by CSI driver or not by the annotation as well \ No newline at end of file diff --git a/pkg/cmd/cli/restic/server.go b/pkg/cmd/cli/restic/server.go index 4de389bf9..5f8fd526b 100644 --- a/pkg/cmd/cli/restic/server.go +++ b/pkg/cmd/cli/restic/server.go @@ -32,6 +32,7 @@ import ( "github.com/sirupsen/logrus" "github.com/spf13/cobra" v1 "k8s.io/api/core/v1" + storagev1api "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" @@ -151,6 +152,7 @@ func newResticServer(logger logrus.FieldLogger, factory client.Factory, metricAd velerov1api.AddToScheme(scheme) v1.AddToScheme(scheme) + storagev1api.AddToScheme(scheme) mgr, err := ctrl.NewManager(clientConfig, ctrl.Options{ Scheme: scheme, }) diff --git a/pkg/controller/pod_volume_backup_controller.go b/pkg/controller/pod_volume_backup_controller.go index ef9c8e4c1..7c92ca187 100644 --- a/pkg/controller/pod_volume_backup_controller.go +++ b/pkg/controller/pod_volume_backup_controller.go @@ -208,7 +208,7 @@ func (c *podVolumeBackupController) processBackup(req *velerov1api.PodVolumeBack return c.fail(req, errors.Wrap(err, "error getting pod").Error(), log) } - volumeDir, err := kube.GetVolumeDirectory(pod, req.Spec.Volume, c.pvcLister, c.pvLister) + volumeDir, err := kube.GetVolumeDirectory(log, pod, req.Spec.Volume, c.pvcLister, c.pvLister, c.kbClient) if err != nil { log.WithError(err).Error("Error getting volume directory name") return c.fail(req, errors.Wrap(err, "error getting volume directory name").Error(), log) diff --git a/pkg/controller/pod_volume_restore_controller.go b/pkg/controller/pod_volume_restore_controller.go index 3b13cb8a8..e04de5c65 100644 --- a/pkg/controller/pod_volume_restore_controller.go +++ b/pkg/controller/pod_volume_restore_controller.go @@ -297,7 +297,7 @@ func (c *podVolumeRestoreController) processRestore(req *velerov1api.PodVolumeRe return c.failRestore(req, errors.Wrap(err, "error getting pod").Error(), log) } - volumeDir, err := kube.GetVolumeDirectory(pod, req.Spec.Volume, c.pvcLister, c.pvLister) + volumeDir, err := kube.GetVolumeDirectory(log, pod, req.Spec.Volume, c.pvcLister, c.pvLister, c.kbClient) if err != nil { log.WithError(err).Error("Error getting volume directory name") return c.failRestore(req, errors.Wrap(err, "error getting volume directory name").Error(), log) diff --git a/pkg/restore/restore.go b/pkg/restore/restore.go index 73b880779..7eb8c8136 100644 --- a/pkg/restore/restore.go +++ b/pkg/restore/restore.go @@ -67,14 +67,6 @@ import ( "github.com/vmware-tanzu/velero/pkg/volume" ) -// These annotations are taken from the Kubernetes persistent volume/persistent volume claim controller. -// They cannot be directly importing because they are part of the kubernetes/kubernetes package, and importing that package is unsupported. -// Their values are well-known and slow changing. They're duplicated here as constants to provide compile-time checking. -// Originals can be found in kubernetes/kubernetes/pkg/controller/volume/persistentvolume/util/util.go. -const KubeAnnBindCompleted = "pv.kubernetes.io/bind-completed" -const KubeAnnBoundByController = "pv.kubernetes.io/bound-by-controller" -const KubeAnnDynamicallyProvisioned = "pv.kubernetes.io/provisioned-by" - type VolumeSnapshotterGetter interface { GetVolumeSnapshotter(name string) (velero.VolumeSnapshotter, error) } @@ -1570,10 +1562,10 @@ func resetVolumeBindingInfo(obj *unstructured.Unstructured) *unstructured.Unstru // Upon restore, this new PV will look like a statically provisioned, manually- // bound volume rather than one bound by the controller, so remove the annotation // that signals that a controller bound it. - delete(annotations, KubeAnnBindCompleted) + delete(annotations, kube.KubeAnnBindCompleted) // Remove the annotation that signals that the PV is already bound; we want // the PV(C) controller to take the two objects and bind them again. - delete(annotations, KubeAnnBoundByController) + delete(annotations, kube.KubeAnnBoundByController) // GetAnnotations returns a copy, so we have to set them again. obj.SetAnnotations(annotations) diff --git a/pkg/restore/restore_test.go b/pkg/restore/restore_test.go index 00bc03ec8..a81676085 100644 --- a/pkg/restore/restore_test.go +++ b/pkg/restore/restore_test.go @@ -52,6 +52,7 @@ import ( resticmocks "github.com/vmware-tanzu/velero/pkg/restic/mocks" "github.com/vmware-tanzu/velero/pkg/test" testutil "github.com/vmware-tanzu/velero/pkg/test" + "github.com/vmware-tanzu/velero/pkg/util/kube" kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube" "github.com/vmware-tanzu/velero/pkg/volume" ) @@ -2987,9 +2988,9 @@ func Test_resetVolumeBindingInfo(t *testing.T) { name: "PVs that are bound have their binding and dynamic provisioning annotations removed", obj: NewTestUnstructured().WithMetadataField("kind", "persistentVolume"). WithName("pv-1").WithAnnotations( - KubeAnnBindCompleted, - KubeAnnBoundByController, - KubeAnnDynamicallyProvisioned, + kube.KubeAnnBindCompleted, + kube.KubeAnnBoundByController, + kube.KubeAnnDynamicallyProvisioned, ).WithSpecField("claimRef", map[string]interface{}{ "namespace": "ns-1", "name": "pvc-1", @@ -2997,7 +2998,7 @@ func Test_resetVolumeBindingInfo(t *testing.T) { "resourceVersion": "1"}).Unstructured, expected: NewTestUnstructured().WithMetadataField("kind", "persistentVolume"). WithName("pv-1"). - WithAnnotations(KubeAnnDynamicallyProvisioned). + WithAnnotations(kube.KubeAnnDynamicallyProvisioned). WithSpecField("claimRef", map[string]interface{}{ "namespace": "ns-1", "name": "pvc-1"}).Unstructured, }, @@ -3005,8 +3006,8 @@ func Test_resetVolumeBindingInfo(t *testing.T) { name: "PVCs that are bound have their binding annotations removed, but the volume name stays", obj: NewTestUnstructured().WithMetadataField("kind", "persistentVolumeClaim"). WithName("pvc-1").WithAnnotations( - KubeAnnBindCompleted, - KubeAnnBoundByController, + kube.KubeAnnBindCompleted, + kube.KubeAnnBoundByController, ).WithSpecField("volumeName", "pv-1").Unstructured, expected: NewTestUnstructured().WithMetadataField("kind", "persistentVolumeClaim"). WithName("pvc-1").WithAnnotations(). diff --git a/pkg/util/kube/utils.go b/pkg/util/kube/utils.go index 0dc096566..ccda34ad3 100644 --- a/pkg/util/kube/utils.go +++ b/pkg/util/kube/utils.go @@ -21,8 +21,12 @@ import ( "fmt" "time" + "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" corev1api "k8s.io/api/core/v1" + storagev1api "k8s.io/api/storage/v1" apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -34,6 +38,14 @@ import ( corev1listers "k8s.io/client-go/listers/core/v1" ) +// These annotations are taken from the Kubernetes persistent volume/persistent volume claim controller. +// They cannot be directly importing because they are part of the kubernetes/kubernetes package, and importing that package is unsupported. +// Their values are well-known and slow changing. They're duplicated here as constants to provide compile-time checking. +// Originals can be found in kubernetes/kubernetes/pkg/controller/volume/persistentvolume/util/util.go. +const KubeAnnBindCompleted = "pv.kubernetes.io/bind-completed" +const KubeAnnBoundByController = "pv.kubernetes.io/bound-by-controller" +const KubeAnnDynamicallyProvisioned = "pv.kubernetes.io/provisioned-by" + // NamespaceAndName returns a string in the format / func NamespaceAndName(objMeta metav1.Object) string { if objMeta.GetNamespace() == "" { @@ -105,7 +117,8 @@ func EnsureNamespaceExistsAndIsReady(namespace *corev1api.Namespace, client core // GetVolumeDirectory gets the name of the directory on the host, under /var/lib/kubelet/pods//volumes/, // where the specified volume lives. // For volumes with a CSIVolumeSource, append "/mount" to the directory name. -func GetVolumeDirectory(pod *corev1api.Pod, volumeName string, pvcLister corev1listers.PersistentVolumeClaimLister, pvLister corev1listers.PersistentVolumeLister) (string, error) { +func GetVolumeDirectory(log logrus.FieldLogger, pod *corev1api.Pod, volumeName string, pvcLister corev1listers.PersistentVolumeClaimLister, + pvLister corev1listers.PersistentVolumeLister, client client.Client) (string, error) { var volume *corev1api.Volume for _, item := range pod.Spec.Volumes { @@ -140,13 +153,41 @@ func GetVolumeDirectory(pod *corev1api.Pod, volumeName string, pvcLister corev1l } // PV's been created with a CSI source. - if pv.Spec.CSI != nil { + isProvisionedByCSI, err := isProvisionedByCSI(log, pv, client) + if err != nil { + return "", errors.WithStack(err) + } + if isProvisionedByCSI { return pvc.Spec.VolumeName + "/mount", nil } return pvc.Spec.VolumeName, nil } +func isProvisionedByCSI(log logrus.FieldLogger, pv *corev1api.PersistentVolume, kbClient client.Client) (bool, error) { + if pv.Spec.CSI != nil { + return true, nil + } + // Although the pv.Spec.CSI is nil, the volume could be provisioned by a CSI driver when enabling the CSI migration + // Refer to https://github.com/vmware-tanzu/velero/issues/4496 for more details + if pv.Annotations != nil { + driverName := pv.Annotations[KubeAnnDynamicallyProvisioned] + if len(driverName) > 0 { + list := &storagev1api.CSIDriverList{} + if err := kbClient.List(context.TODO(), list); err != nil { + return false, err + } + for _, driver := range list.Items { + if driverName == driver.Name { + log.Debugf("the annotation %s=%s indicates the volume is provisioned by a CSI driver", KubeAnnDynamicallyProvisioned, driverName) + return true, nil + } + } + } + } + return false, nil +} + // IsV1CRDReady checks a v1 CRD to see if it's ready, with both the Established and NamesAccepted conditions. func IsV1CRDReady(crd *apiextv1.CustomResourceDefinition) bool { var isEstablished, namesAccepted bool diff --git a/pkg/util/kube/utils_test.go b/pkg/util/kube/utils_test.go index 115e09fea..eea77b110 100644 --- a/pkg/util/kube/utils_test.go +++ b/pkg/util/kube/utils_test.go @@ -25,6 +25,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" + storagev1api "k8s.io/api/storage/v1" apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -33,6 +34,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" kubeinformers "k8s.io/client-go/informers" + "sigs.k8s.io/controller-runtime/pkg/client/fake" "github.com/vmware-tanzu/velero/pkg/builder" "github.com/vmware-tanzu/velero/pkg/test" @@ -188,8 +190,19 @@ func TestGetVolumeDirectorySuccess(t *testing.T) { pod: builder.ForPod("ns-1", "my-pod").Volumes(builder.ForVolume("my-vol").Result()).Result(), want: "my-vol", }, + { + name: "Volume with CSI annotation appends '/mount' to the volume name", + pod: builder.ForPod("ns-1", "my-pod").Volumes(builder.ForVolume("my-vol").PersistentVolumeClaimSource("my-pvc").Result()).Result(), + pvc: builder.ForPersistentVolumeClaim("ns-1", "my-pvc").VolumeName("a-pv").Result(), + pv: builder.ForPersistentVolume("a-pv").ObjectMeta(builder.WithAnnotations(KubeAnnDynamicallyProvisioned, "csi.test.com")).Result(), + want: "a-pv/mount", + }, } + csiDriver := storagev1api.CSIDriver{ + ObjectMeta: metav1.ObjectMeta{Name: "csi.test.com"}, + } + kbClient := fake.NewClientBuilder().WithLists(&storagev1api.CSIDriverList{Items: []storagev1api.CSIDriver{csiDriver}}).Build() for _, tc := range tests { h := newHarness(t) @@ -204,7 +217,7 @@ func TestGetVolumeDirectorySuccess(t *testing.T) { } // Function under test - dir, err := GetVolumeDirectory(tc.pod, tc.pod.Spec.Volumes[0].Name, pvcInformer.Lister(), pvInformer.Lister()) + dir, err := GetVolumeDirectory(logrus.StandardLogger(), tc.pod, tc.pod.Spec.Volumes[0].Name, pvcInformer.Lister(), pvInformer.Lister(), kbClient) require.NoError(t, err) assert.Equal(t, tc.want, dir)