Merge pull request #4513 from ywk253100/220111_restic

Check whether the volume is provisioned by CSI driver or not by the annotation as well
This commit is contained in:
Daniel Jiang
2022-01-12 19:54:55 +08:00
committed by GitHub
9 changed files with 72 additions and 22 deletions

View File

@@ -103,7 +103,7 @@ local_resource(
local_resource(
"restic_binary",
cmd = 'cd ' + '.' + ';mkdir -p _tiltbuild/restic; BIN=velero GOOS=' + local_goos + ' GOARCH=amd64 RESTIC_VERSION=0.12.0 OUTPUT_DIR=_tiltbuild/restic ./hack/download-restic.sh',
cmd = 'cd ' + '.' + ';mkdir -p _tiltbuild/restic; BIN=velero GOOS=linux GOARCH=amd64 RESTIC_VERSION=0.12.0 OUTPUT_DIR=_tiltbuild/restic ./hack/download-restic.sh',
)
# Note: we need a distro with a bash shell to exec into the Velero container

View File

@@ -0,0 +1 @@
Check whether the volume is provisioned by CSI driver or not by the annotation as well

View File

@@ -32,6 +32,7 @@ import (
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
v1 "k8s.io/api/core/v1"
storagev1api "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
@@ -151,6 +152,7 @@ func newResticServer(logger logrus.FieldLogger, factory client.Factory, metricAd
velerov1api.AddToScheme(scheme)
v1.AddToScheme(scheme)
storagev1api.AddToScheme(scheme)
mgr, err := ctrl.NewManager(clientConfig, ctrl.Options{
Scheme: scheme,
})

View File

@@ -208,7 +208,7 @@ func (c *podVolumeBackupController) processBackup(req *velerov1api.PodVolumeBack
return c.fail(req, errors.Wrap(err, "error getting pod").Error(), log)
}
volumeDir, err := kube.GetVolumeDirectory(pod, req.Spec.Volume, c.pvcLister, c.pvLister)
volumeDir, err := kube.GetVolumeDirectory(log, pod, req.Spec.Volume, c.pvcLister, c.pvLister, c.kbClient)
if err != nil {
log.WithError(err).Error("Error getting volume directory name")
return c.fail(req, errors.Wrap(err, "error getting volume directory name").Error(), log)

View File

@@ -297,7 +297,7 @@ func (c *podVolumeRestoreController) processRestore(req *velerov1api.PodVolumeRe
return c.failRestore(req, errors.Wrap(err, "error getting pod").Error(), log)
}
volumeDir, err := kube.GetVolumeDirectory(pod, req.Spec.Volume, c.pvcLister, c.pvLister)
volumeDir, err := kube.GetVolumeDirectory(log, pod, req.Spec.Volume, c.pvcLister, c.pvLister, c.kbClient)
if err != nil {
log.WithError(err).Error("Error getting volume directory name")
return c.failRestore(req, errors.Wrap(err, "error getting volume directory name").Error(), log)

View File

@@ -67,14 +67,6 @@ import (
"github.com/vmware-tanzu/velero/pkg/volume"
)
// These annotations are taken from the Kubernetes persistent volume/persistent volume claim controller.
// They cannot be directly importing because they are part of the kubernetes/kubernetes package, and importing that package is unsupported.
// Their values are well-known and slow changing. They're duplicated here as constants to provide compile-time checking.
// Originals can be found in kubernetes/kubernetes/pkg/controller/volume/persistentvolume/util/util.go.
const KubeAnnBindCompleted = "pv.kubernetes.io/bind-completed"
const KubeAnnBoundByController = "pv.kubernetes.io/bound-by-controller"
const KubeAnnDynamicallyProvisioned = "pv.kubernetes.io/provisioned-by"
type VolumeSnapshotterGetter interface {
GetVolumeSnapshotter(name string) (velero.VolumeSnapshotter, error)
}
@@ -1570,10 +1562,10 @@ func resetVolumeBindingInfo(obj *unstructured.Unstructured) *unstructured.Unstru
// Upon restore, this new PV will look like a statically provisioned, manually-
// bound volume rather than one bound by the controller, so remove the annotation
// that signals that a controller bound it.
delete(annotations, KubeAnnBindCompleted)
delete(annotations, kube.KubeAnnBindCompleted)
// Remove the annotation that signals that the PV is already bound; we want
// the PV(C) controller to take the two objects and bind them again.
delete(annotations, KubeAnnBoundByController)
delete(annotations, kube.KubeAnnBoundByController)
// GetAnnotations returns a copy, so we have to set them again.
obj.SetAnnotations(annotations)

View File

@@ -52,6 +52,7 @@ import (
resticmocks "github.com/vmware-tanzu/velero/pkg/restic/mocks"
"github.com/vmware-tanzu/velero/pkg/test"
testutil "github.com/vmware-tanzu/velero/pkg/test"
"github.com/vmware-tanzu/velero/pkg/util/kube"
kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube"
"github.com/vmware-tanzu/velero/pkg/volume"
)
@@ -2987,9 +2988,9 @@ func Test_resetVolumeBindingInfo(t *testing.T) {
name: "PVs that are bound have their binding and dynamic provisioning annotations removed",
obj: NewTestUnstructured().WithMetadataField("kind", "persistentVolume").
WithName("pv-1").WithAnnotations(
KubeAnnBindCompleted,
KubeAnnBoundByController,
KubeAnnDynamicallyProvisioned,
kube.KubeAnnBindCompleted,
kube.KubeAnnBoundByController,
kube.KubeAnnDynamicallyProvisioned,
).WithSpecField("claimRef", map[string]interface{}{
"namespace": "ns-1",
"name": "pvc-1",
@@ -2997,7 +2998,7 @@ func Test_resetVolumeBindingInfo(t *testing.T) {
"resourceVersion": "1"}).Unstructured,
expected: NewTestUnstructured().WithMetadataField("kind", "persistentVolume").
WithName("pv-1").
WithAnnotations(KubeAnnDynamicallyProvisioned).
WithAnnotations(kube.KubeAnnDynamicallyProvisioned).
WithSpecField("claimRef", map[string]interface{}{
"namespace": "ns-1", "name": "pvc-1"}).Unstructured,
},
@@ -3005,8 +3006,8 @@ func Test_resetVolumeBindingInfo(t *testing.T) {
name: "PVCs that are bound have their binding annotations removed, but the volume name stays",
obj: NewTestUnstructured().WithMetadataField("kind", "persistentVolumeClaim").
WithName("pvc-1").WithAnnotations(
KubeAnnBindCompleted,
KubeAnnBoundByController,
kube.KubeAnnBindCompleted,
kube.KubeAnnBoundByController,
).WithSpecField("volumeName", "pv-1").Unstructured,
expected: NewTestUnstructured().WithMetadataField("kind", "persistentVolumeClaim").
WithName("pvc-1").WithAnnotations().

View File

@@ -21,8 +21,12 @@ import (
"fmt"
"time"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
corev1api "k8s.io/api/core/v1"
storagev1api "k8s.io/api/storage/v1"
apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -34,6 +38,14 @@ import (
corev1listers "k8s.io/client-go/listers/core/v1"
)
// These annotations are taken from the Kubernetes persistent volume/persistent volume claim controller.
// They cannot be directly importing because they are part of the kubernetes/kubernetes package, and importing that package is unsupported.
// Their values are well-known and slow changing. They're duplicated here as constants to provide compile-time checking.
// Originals can be found in kubernetes/kubernetes/pkg/controller/volume/persistentvolume/util/util.go.
const KubeAnnBindCompleted = "pv.kubernetes.io/bind-completed"
const KubeAnnBoundByController = "pv.kubernetes.io/bound-by-controller"
const KubeAnnDynamicallyProvisioned = "pv.kubernetes.io/provisioned-by"
// NamespaceAndName returns a string in the format <namespace>/<name>
func NamespaceAndName(objMeta metav1.Object) string {
if objMeta.GetNamespace() == "" {
@@ -105,7 +117,8 @@ func EnsureNamespaceExistsAndIsReady(namespace *corev1api.Namespace, client core
// GetVolumeDirectory gets the name of the directory on the host, under /var/lib/kubelet/pods/<podUID>/volumes/,
// where the specified volume lives.
// For volumes with a CSIVolumeSource, append "/mount" to the directory name.
func GetVolumeDirectory(pod *corev1api.Pod, volumeName string, pvcLister corev1listers.PersistentVolumeClaimLister, pvLister corev1listers.PersistentVolumeLister) (string, error) {
func GetVolumeDirectory(log logrus.FieldLogger, pod *corev1api.Pod, volumeName string, pvcLister corev1listers.PersistentVolumeClaimLister,
pvLister corev1listers.PersistentVolumeLister, client client.Client) (string, error) {
var volume *corev1api.Volume
for _, item := range pod.Spec.Volumes {
@@ -140,13 +153,41 @@ func GetVolumeDirectory(pod *corev1api.Pod, volumeName string, pvcLister corev1l
}
// PV's been created with a CSI source.
if pv.Spec.CSI != nil {
isProvisionedByCSI, err := isProvisionedByCSI(log, pv, client)
if err != nil {
return "", errors.WithStack(err)
}
if isProvisionedByCSI {
return pvc.Spec.VolumeName + "/mount", nil
}
return pvc.Spec.VolumeName, nil
}
func isProvisionedByCSI(log logrus.FieldLogger, pv *corev1api.PersistentVolume, kbClient client.Client) (bool, error) {
if pv.Spec.CSI != nil {
return true, nil
}
// Although the pv.Spec.CSI is nil, the volume could be provisioned by a CSI driver when enabling the CSI migration
// Refer to https://github.com/vmware-tanzu/velero/issues/4496 for more details
if pv.Annotations != nil {
driverName := pv.Annotations[KubeAnnDynamicallyProvisioned]
if len(driverName) > 0 {
list := &storagev1api.CSIDriverList{}
if err := kbClient.List(context.TODO(), list); err != nil {
return false, err
}
for _, driver := range list.Items {
if driverName == driver.Name {
log.Debugf("the annotation %s=%s indicates the volume is provisioned by a CSI driver", KubeAnnDynamicallyProvisioned, driverName)
return true, nil
}
}
}
}
return false, nil
}
// IsV1CRDReady checks a v1 CRD to see if it's ready, with both the Established and NamesAccepted conditions.
func IsV1CRDReady(crd *apiextv1.CustomResourceDefinition) bool {
var isEstablished, namesAccepted bool

View File

@@ -25,6 +25,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
storagev1api "k8s.io/api/storage/v1"
apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
@@ -33,6 +34,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
kubeinformers "k8s.io/client-go/informers"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/vmware-tanzu/velero/pkg/builder"
"github.com/vmware-tanzu/velero/pkg/test"
@@ -188,8 +190,19 @@ func TestGetVolumeDirectorySuccess(t *testing.T) {
pod: builder.ForPod("ns-1", "my-pod").Volumes(builder.ForVolume("my-vol").Result()).Result(),
want: "my-vol",
},
{
name: "Volume with CSI annotation appends '/mount' to the volume name",
pod: builder.ForPod("ns-1", "my-pod").Volumes(builder.ForVolume("my-vol").PersistentVolumeClaimSource("my-pvc").Result()).Result(),
pvc: builder.ForPersistentVolumeClaim("ns-1", "my-pvc").VolumeName("a-pv").Result(),
pv: builder.ForPersistentVolume("a-pv").ObjectMeta(builder.WithAnnotations(KubeAnnDynamicallyProvisioned, "csi.test.com")).Result(),
want: "a-pv/mount",
},
}
csiDriver := storagev1api.CSIDriver{
ObjectMeta: metav1.ObjectMeta{Name: "csi.test.com"},
}
kbClient := fake.NewClientBuilder().WithLists(&storagev1api.CSIDriverList{Items: []storagev1api.CSIDriver{csiDriver}}).Build()
for _, tc := range tests {
h := newHarness(t)
@@ -204,7 +217,7 @@ func TestGetVolumeDirectorySuccess(t *testing.T) {
}
// Function under test
dir, err := GetVolumeDirectory(tc.pod, tc.pod.Spec.Volumes[0].Name, pvcInformer.Lister(), pvInformer.Lister())
dir, err := GetVolumeDirectory(logrus.StandardLogger(), tc.pod, tc.pod.Spec.Volumes[0].Name, pvcInformer.Lister(), pvInformer.Lister(), kbClient)
require.NoError(t, err)
assert.Equal(t, tc.want, dir)