mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-07 05:46:37 +00:00
chore: define common aliases for k8s packages (#8672)
Some checks failed
Run the E2E test on kind / build (push) Failing after 6m48s
Run the E2E test on kind / setup-test-matrix (push) Successful in 3s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / Build (push) Failing after 35s
Close stale issues and PRs / stale (push) Successful in 8s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 1m11s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 47s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 49s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 43s
Some checks failed
Run the E2E test on kind / build (push) Failing after 6m48s
Run the E2E test on kind / setup-test-matrix (push) Successful in 3s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / Build (push) Failing after 35s
Close stale issues and PRs / stale (push) Successful in 8s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 1m11s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 47s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 49s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 43s
* lchore: define common alias for k8s packages Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com> * Update .golangci.yaml Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com> * Update .golangci.yaml Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com> * Update .golangci.yaml Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com> --------- Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
This commit is contained in:
@@ -26,7 +26,7 @@ import (
|
||||
flag "github.com/spf13/pflag"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
factorymocks "github.com/vmware-tanzu/velero/pkg/client/mocks"
|
||||
@@ -80,8 +80,8 @@ func TestBuildBackupStorageLocationSetsCredential(t *testing.T) {
|
||||
|
||||
bsl, err = o.BuildBackupStorageLocation("velero-test-ns", false, true)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, &v1.SecretKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{Name: "my-secret"},
|
||||
assert.Equal(t, &corev1api.SecretKeySelector{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{Name: "my-secret"},
|
||||
Key: "key-from-secret",
|
||||
}, bsl.Spec.Credential)
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
@@ -142,7 +142,7 @@ func newdataMoverBackup(logger logrus.FieldLogger, factory client.Factory, confi
|
||||
return nil, errors.Wrap(err, "error to add velero v2alpha1 scheme")
|
||||
}
|
||||
|
||||
if err := v1.AddToScheme(scheme); err != nil {
|
||||
if err := corev1api.AddToScheme(scheme); err != nil {
|
||||
cancelFunc()
|
||||
return nil, errors.Wrap(err, "error to add core v1 scheme")
|
||||
}
|
||||
@@ -153,7 +153,7 @@ func newdataMoverBackup(logger logrus.FieldLogger, factory client.Factory, confi
|
||||
cacheOption := ctlcache.Options{
|
||||
Scheme: scheme,
|
||||
ByObject: map[ctlclient.Object]ctlcache.ByObject{
|
||||
&v1.Pod{}: {
|
||||
&corev1api.Pod{}: {
|
||||
Field: fields.Set{"spec.nodeName": nodeName}.AsSelector(),
|
||||
},
|
||||
&velerov2alpha1api.DataUpload{}: {
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
@@ -134,7 +134,7 @@ func newdataMoverRestore(logger logrus.FieldLogger, factory client.Factory, conf
|
||||
return nil, errors.Wrap(err, "error to add velero v2alpha1 scheme")
|
||||
}
|
||||
|
||||
if err := v1.AddToScheme(scheme); err != nil {
|
||||
if err := corev1api.AddToScheme(scheme); err != nil {
|
||||
cancelFunc()
|
||||
return nil, errors.Wrap(err, "error to add core v1 scheme")
|
||||
}
|
||||
@@ -145,7 +145,7 @@ func newdataMoverRestore(logger logrus.FieldLogger, factory client.Factory, conf
|
||||
cacheOption := ctlcache.Options{
|
||||
Scheme: scheme,
|
||||
ByObject: map[ctlclient.Object]ctlcache.ByObject{
|
||||
&v1.Pod{}: {
|
||||
&corev1api.Pod{}: {
|
||||
Field: fields.Set{"spec.nodeName": nodeName}.AsSelector(),
|
||||
},
|
||||
&velerov2alpha1api.DataDownload{}: {
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/vmware-tanzu/crash-diagnostics/exec"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
appsv1api "k8s.io/api/apps/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
@@ -117,7 +117,7 @@ func (o *option) validate(f client.Factory) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
deploymentList := new(appsv1.DeploymentList)
|
||||
deploymentList := new(appsv1api.DeploymentList)
|
||||
selector, err := labels.Parse("component=velero")
|
||||
cmd.CheckError(err)
|
||||
err = crClient.List(context.TODO(), deploymentList, &ctrlclient.ListOptions{
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
storagev1api "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
@@ -170,7 +170,7 @@ func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, confi
|
||||
cancelFunc()
|
||||
return nil, err
|
||||
}
|
||||
if err := v1.AddToScheme(scheme); err != nil {
|
||||
if err := corev1api.AddToScheme(scheme); err != nil {
|
||||
cancelFunc()
|
||||
return nil, err
|
||||
}
|
||||
@@ -184,7 +184,7 @@ func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, confi
|
||||
// use a field selector to filter to only pods scheduled on this node.
|
||||
cacheOption := cache.Options{
|
||||
ByObject: map[ctrlclient.Object]cache.ByObject{
|
||||
&v1.Pod{}: {
|
||||
&corev1api.Pod{}: {
|
||||
Field: fields.Set{"spec.nodeName": nodeName}.AsSelector(),
|
||||
},
|
||||
&velerov1api.PodVolumeBackup{}: {
|
||||
@@ -199,7 +199,7 @@ func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, confi
|
||||
&velerov2alpha1api.DataDownload{}: {
|
||||
Field: fields.Set{"metadata.namespace": factory.Namespace()}.AsSelector(),
|
||||
},
|
||||
&v1.Event{}: {
|
||||
&corev1api.Event{}: {
|
||||
Field: fields.Set{"metadata.namespace": factory.Namespace()}.AsSelector(),
|
||||
},
|
||||
},
|
||||
@@ -328,7 +328,7 @@ func (s *nodeAgentServer) run() {
|
||||
s.logger.Infof("Using customized backupPVC config %v", backupPVCConfig)
|
||||
}
|
||||
|
||||
podResources := v1.ResourceRequirements{}
|
||||
podResources := corev1api.ResourceRequirements{}
|
||||
if s.dataPathConfigs != nil && s.dataPathConfigs.PodResources != nil {
|
||||
if res, err := kube.ParseResourceRequirements(s.dataPathConfigs.PodResources.CPURequest, s.dataPathConfigs.PodResources.MemoryRequest, s.dataPathConfigs.PodResources.CPULimit, s.dataPathConfigs.PodResources.MemoryLimit); err != nil {
|
||||
s.logger.WithError(err).Warn("Pod resource requirements are invalid, ignore")
|
||||
@@ -391,7 +391,7 @@ func (s *nodeAgentServer) run() {
|
||||
}
|
||||
|
||||
func (s *nodeAgentServer) waitCacheForResume() error {
|
||||
podInformer, err := s.mgr.GetCache().GetInformer(s.ctx, &v1.Pod{})
|
||||
podInformer, err := s.mgr.GetCache().GetInformer(s.ctx, &corev1api.Pod{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error getting pod informer")
|
||||
}
|
||||
@@ -444,7 +444,7 @@ func (s *nodeAgentServer) validatePodVolumesHostPath(client kubernetes.Interface
|
||||
|
||||
// if the pod is a mirror pod, the directory name is the hash value of the
|
||||
// mirror pod annotation
|
||||
if hash, ok := pod.GetAnnotations()[v1.MirrorPodAnnotationKey]; ok {
|
||||
if hash, ok := pod.GetAnnotations()[corev1api.MirrorPodAnnotationKey]; ok {
|
||||
dirName = hash
|
||||
}
|
||||
|
||||
@@ -517,7 +517,7 @@ func (s *nodeAgentServer) markInProgressPVRsFailed(client ctrlclient.Client) {
|
||||
continue
|
||||
}
|
||||
|
||||
pod := &v1.Pod{}
|
||||
pod := &corev1api.Pod{}
|
||||
if err := client.Get(s.ctx, types.NamespacedName{
|
||||
Namespace: pvr.Spec.Pod.Namespace,
|
||||
Name: pvr.Spec.Pod.Name,
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
@@ -38,14 +38,14 @@ import (
|
||||
func Test_validatePodVolumesHostPath(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pods []*corev1.Pod
|
||||
pods []*corev1api.Pod
|
||||
dirs []string
|
||||
createDir bool
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "no error when pod volumes are present",
|
||||
pods: []*corev1.Pod{
|
||||
pods: []*corev1api.Pod{
|
||||
builder.ForPod("foo", "bar").ObjectMeta(builder.WithUID("foo")).Result(),
|
||||
builder.ForPod("zoo", "raz").ObjectMeta(builder.WithUID("zoo")).Result(),
|
||||
},
|
||||
@@ -55,9 +55,9 @@ func Test_validatePodVolumesHostPath(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "no error when pod volumes are present and there are mirror pods",
|
||||
pods: []*corev1.Pod{
|
||||
pods: []*corev1api.Pod{
|
||||
builder.ForPod("foo", "bar").ObjectMeta(builder.WithUID("foo")).Result(),
|
||||
builder.ForPod("zoo", "raz").ObjectMeta(builder.WithUID("zoo"), builder.WithAnnotations(corev1.MirrorPodAnnotationKey, "baz")).Result(),
|
||||
builder.ForPod("zoo", "raz").ObjectMeta(builder.WithUID("zoo"), builder.WithAnnotations(corev1api.MirrorPodAnnotationKey, "baz")).Result(),
|
||||
},
|
||||
dirs: []string{"foo", "baz"},
|
||||
createDir: true,
|
||||
@@ -65,7 +65,7 @@ func Test_validatePodVolumesHostPath(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "error when all pod volumes missing",
|
||||
pods: []*corev1.Pod{
|
||||
pods: []*corev1api.Pod{
|
||||
builder.ForPod("foo", "bar").ObjectMeta(builder.WithUID("foo")).Result(),
|
||||
builder.ForPod("zoo", "raz").ObjectMeta(builder.WithUID("zoo")).Result(),
|
||||
},
|
||||
@@ -75,7 +75,7 @@ func Test_validatePodVolumesHostPath(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "error when some pod volumes missing",
|
||||
pods: []*corev1.Pod{
|
||||
pods: []*corev1api.Pod{
|
||||
builder.ForPod("foo", "bar").ObjectMeta(builder.WithUID("foo")).Result(),
|
||||
builder.ForPod("zoo", "raz").ObjectMeta(builder.WithUID("zoo")).Result(),
|
||||
},
|
||||
@@ -85,7 +85,7 @@ func Test_validatePodVolumesHostPath(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "no error when pod volumes are not present",
|
||||
pods: []*corev1.Pod{
|
||||
pods: []*corev1api.Pod{
|
||||
builder.ForPod("foo", "bar").ObjectMeta(builder.WithUID("foo")).Result(),
|
||||
},
|
||||
dirs: []string{"foo"},
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
@@ -90,7 +90,7 @@ func (o *Options) initClient(f velerocli.Factory) (client.Client, error) {
|
||||
return nil, errors.Wrap(err, "failed to add velero scheme")
|
||||
}
|
||||
|
||||
err = v1.AddToScheme(scheme)
|
||||
err = corev1api.AddToScheme(scheme)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to add api core scheme")
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
@@ -299,12 +299,12 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
|
||||
}
|
||||
}
|
||||
|
||||
var resModifiers *corev1.TypedLocalObjectReference
|
||||
var resModifiers *corev1api.TypedLocalObjectReference
|
||||
|
||||
if o.ResourceModifierConfigMap != "" {
|
||||
resModifiers = &corev1.TypedLocalObjectReference{
|
||||
resModifiers = &corev1api.TypedLocalObjectReference{
|
||||
// Group for core API is ""
|
||||
APIGroup: &corev1.SchemeGroupVersion.Group,
|
||||
APIGroup: &corev1api.SchemeGroupVersion.Group,
|
||||
Kind: resourcemodifiers.ConfigmapRefType,
|
||||
Name: o.ResourceModifierConfigMap,
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/resourcepolicies"
|
||||
@@ -168,7 +168,7 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
|
||||
}
|
||||
|
||||
if o.BackupOptions.ResPoliciesConfigmap != "" {
|
||||
schedule.Spec.Template.ResourcePolicy = &v1.TypedLocalObjectReference{Kind: resourcepolicies.ConfigmapRefType, Name: o.BackupOptions.ResPoliciesConfigmap}
|
||||
schedule.Spec.Template.ResourcePolicy = &corev1api.TypedLocalObjectReference{Kind: resourcepolicies.ConfigmapRefType, Name: o.BackupOptions.ResPoliciesConfigmap}
|
||||
}
|
||||
|
||||
if o.BackupOptions.ParallelFilesUpload > 0 {
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
appsv1api "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -173,7 +173,7 @@ func Run(ctx context.Context, kbClient kbclient.Client, namespace string) error
|
||||
|
||||
func deleteNamespace(ctx context.Context, kbClient kbclient.Client, namespace string) error {
|
||||
// First check if it's already been deleted
|
||||
ns := &corev1.Namespace{}
|
||||
ns := &corev1api.Namespace{}
|
||||
key := kbclient.ObjectKey{Name: namespace}
|
||||
if err := kbClient.Get(ctx, key, ns); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
|
||||
Reference in New Issue
Block a user