mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-10 15:07:29 +00:00
chore: define common aliases for k8s packages (#8672)
Some checks failed
Run the E2E test on kind / build (push) Failing after 6m48s
Run the E2E test on kind / setup-test-matrix (push) Successful in 3s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / Build (push) Failing after 35s
Close stale issues and PRs / stale (push) Successful in 8s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 1m11s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 47s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 49s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 43s
Some checks failed
Run the E2E test on kind / build (push) Failing after 6m48s
Run the E2E test on kind / setup-test-matrix (push) Successful in 3s
Run the E2E test on kind / run-e2e-test (push) Has been skipped
Main CI / Build (push) Failing after 35s
Close stale issues and PRs / stale (push) Successful in 8s
Trivy Nightly Scan / Trivy nightly scan (velero, main) (push) Failing after 1m11s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-aws, main) (push) Failing after 47s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-gcp, main) (push) Failing after 49s
Trivy Nightly Scan / Trivy nightly scan (velero-plugin-for-microsoft-azure, main) (push) Failing after 43s
* lchore: define common alias for k8s packages Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com> * Update .golangci.yaml Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com> * Update .golangci.yaml Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com> * Update .golangci.yaml Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com> --------- Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
This commit is contained in:
@@ -30,7 +30,7 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
storagev1api "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
@@ -170,7 +170,7 @@ func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, confi
|
||||
cancelFunc()
|
||||
return nil, err
|
||||
}
|
||||
if err := v1.AddToScheme(scheme); err != nil {
|
||||
if err := corev1api.AddToScheme(scheme); err != nil {
|
||||
cancelFunc()
|
||||
return nil, err
|
||||
}
|
||||
@@ -184,7 +184,7 @@ func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, confi
|
||||
// use a field selector to filter to only pods scheduled on this node.
|
||||
cacheOption := cache.Options{
|
||||
ByObject: map[ctrlclient.Object]cache.ByObject{
|
||||
&v1.Pod{}: {
|
||||
&corev1api.Pod{}: {
|
||||
Field: fields.Set{"spec.nodeName": nodeName}.AsSelector(),
|
||||
},
|
||||
&velerov1api.PodVolumeBackup{}: {
|
||||
@@ -199,7 +199,7 @@ func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, confi
|
||||
&velerov2alpha1api.DataDownload{}: {
|
||||
Field: fields.Set{"metadata.namespace": factory.Namespace()}.AsSelector(),
|
||||
},
|
||||
&v1.Event{}: {
|
||||
&corev1api.Event{}: {
|
||||
Field: fields.Set{"metadata.namespace": factory.Namespace()}.AsSelector(),
|
||||
},
|
||||
},
|
||||
@@ -328,7 +328,7 @@ func (s *nodeAgentServer) run() {
|
||||
s.logger.Infof("Using customized backupPVC config %v", backupPVCConfig)
|
||||
}
|
||||
|
||||
podResources := v1.ResourceRequirements{}
|
||||
podResources := corev1api.ResourceRequirements{}
|
||||
if s.dataPathConfigs != nil && s.dataPathConfigs.PodResources != nil {
|
||||
if res, err := kube.ParseResourceRequirements(s.dataPathConfigs.PodResources.CPURequest, s.dataPathConfigs.PodResources.MemoryRequest, s.dataPathConfigs.PodResources.CPULimit, s.dataPathConfigs.PodResources.MemoryLimit); err != nil {
|
||||
s.logger.WithError(err).Warn("Pod resource requirements are invalid, ignore")
|
||||
@@ -391,7 +391,7 @@ func (s *nodeAgentServer) run() {
|
||||
}
|
||||
|
||||
func (s *nodeAgentServer) waitCacheForResume() error {
|
||||
podInformer, err := s.mgr.GetCache().GetInformer(s.ctx, &v1.Pod{})
|
||||
podInformer, err := s.mgr.GetCache().GetInformer(s.ctx, &corev1api.Pod{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error getting pod informer")
|
||||
}
|
||||
@@ -444,7 +444,7 @@ func (s *nodeAgentServer) validatePodVolumesHostPath(client kubernetes.Interface
|
||||
|
||||
// if the pod is a mirror pod, the directory name is the hash value of the
|
||||
// mirror pod annotation
|
||||
if hash, ok := pod.GetAnnotations()[v1.MirrorPodAnnotationKey]; ok {
|
||||
if hash, ok := pod.GetAnnotations()[corev1api.MirrorPodAnnotationKey]; ok {
|
||||
dirName = hash
|
||||
}
|
||||
|
||||
@@ -517,7 +517,7 @@ func (s *nodeAgentServer) markInProgressPVRsFailed(client ctrlclient.Client) {
|
||||
continue
|
||||
}
|
||||
|
||||
pod := &v1.Pod{}
|
||||
pod := &corev1api.Pod{}
|
||||
if err := client.Get(s.ctx, types.NamespacedName{
|
||||
Namespace: pvr.Spec.Pod.Namespace,
|
||||
Name: pvr.Spec.Pod.Name,
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
@@ -38,14 +38,14 @@ import (
|
||||
func Test_validatePodVolumesHostPath(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pods []*corev1.Pod
|
||||
pods []*corev1api.Pod
|
||||
dirs []string
|
||||
createDir bool
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "no error when pod volumes are present",
|
||||
pods: []*corev1.Pod{
|
||||
pods: []*corev1api.Pod{
|
||||
builder.ForPod("foo", "bar").ObjectMeta(builder.WithUID("foo")).Result(),
|
||||
builder.ForPod("zoo", "raz").ObjectMeta(builder.WithUID("zoo")).Result(),
|
||||
},
|
||||
@@ -55,9 +55,9 @@ func Test_validatePodVolumesHostPath(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "no error when pod volumes are present and there are mirror pods",
|
||||
pods: []*corev1.Pod{
|
||||
pods: []*corev1api.Pod{
|
||||
builder.ForPod("foo", "bar").ObjectMeta(builder.WithUID("foo")).Result(),
|
||||
builder.ForPod("zoo", "raz").ObjectMeta(builder.WithUID("zoo"), builder.WithAnnotations(corev1.MirrorPodAnnotationKey, "baz")).Result(),
|
||||
builder.ForPod("zoo", "raz").ObjectMeta(builder.WithUID("zoo"), builder.WithAnnotations(corev1api.MirrorPodAnnotationKey, "baz")).Result(),
|
||||
},
|
||||
dirs: []string{"foo", "baz"},
|
||||
createDir: true,
|
||||
@@ -65,7 +65,7 @@ func Test_validatePodVolumesHostPath(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "error when all pod volumes missing",
|
||||
pods: []*corev1.Pod{
|
||||
pods: []*corev1api.Pod{
|
||||
builder.ForPod("foo", "bar").ObjectMeta(builder.WithUID("foo")).Result(),
|
||||
builder.ForPod("zoo", "raz").ObjectMeta(builder.WithUID("zoo")).Result(),
|
||||
},
|
||||
@@ -75,7 +75,7 @@ func Test_validatePodVolumesHostPath(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "error when some pod volumes missing",
|
||||
pods: []*corev1.Pod{
|
||||
pods: []*corev1api.Pod{
|
||||
builder.ForPod("foo", "bar").ObjectMeta(builder.WithUID("foo")).Result(),
|
||||
builder.ForPod("zoo", "raz").ObjectMeta(builder.WithUID("zoo")).Result(),
|
||||
},
|
||||
@@ -85,7 +85,7 @@ func Test_validatePodVolumesHostPath(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "no error when pod volumes are not present",
|
||||
pods: []*corev1.Pod{
|
||||
pods: []*corev1api.Pod{
|
||||
builder.ForPod("foo", "bar").ObjectMeta(builder.WithUID("foo")).Result(),
|
||||
},
|
||||
dirs: []string{"foo"},
|
||||
|
||||
Reference in New Issue
Block a user