mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-08 14:21:18 +00:00
fix lint loop iterator problem
Signed-off-by: Lyndon-Li <lyonghui@vmware.com>
This commit is contained in:
1
changelogs/unreleased/5459-lyndon
Normal file
1
changelogs/unreleased/5459-lyndon
Normal file
@@ -0,0 +1 @@
|
||||
Upgrade velero docker image to use go 1.18 and upgrade golangci-lint to 1.45.0
|
||||
@@ -117,7 +117,7 @@ linters-settings:
|
||||
# minimal length of string constant, 3 by default
|
||||
min-len: 3
|
||||
# minimal occurrences count to trigger, 3 by default
|
||||
min-occurrences: 3
|
||||
min-occurrences: 5
|
||||
gocritic:
|
||||
# Which checks should be enabled; can't be combined with 'disabled-checks';
|
||||
# See https://go-critic.github.io/overview#checks-overview
|
||||
|
||||
@@ -112,10 +112,6 @@ func NewCreateOptions() *CreateOptions {
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
strTrue = "true"
|
||||
)
|
||||
|
||||
func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) {
|
||||
flags.DurationVar(&o.TTL, "ttl", o.TTL, "How long before the backup can be garbage collected.")
|
||||
flags.Var(&o.IncludeNamespaces, "include-namespaces", "Namespaces to include in the backup (use '*' for all namespaces).")
|
||||
@@ -131,13 +127,13 @@ func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) {
|
||||
f := flags.VarPF(&o.SnapshotVolumes, "snapshot-volumes", "", "Take snapshots of PersistentVolumes as part of the backup. If the parameter is not set, it is treated as setting to 'true'.")
|
||||
// this allows the user to just specify "--snapshot-volumes" as shorthand for "--snapshot-volumes=true"
|
||||
// like a normal bool flag
|
||||
f.NoOptDefVal = strTrue
|
||||
f.NoOptDefVal = "true"
|
||||
|
||||
f = flags.VarPF(&o.IncludeClusterResources, "include-cluster-resources", "", "Include cluster-scoped resources in the backup")
|
||||
f.NoOptDefVal = strTrue
|
||||
f.NoOptDefVal = "true"
|
||||
|
||||
f = flags.VarPF(&o.DefaultVolumesToFsBackup, "default-volumes-to-fs-backup", "", "Use pod volume file system backup by default for volumes")
|
||||
f.NoOptDefVal = strTrue
|
||||
f.NoOptDefVal = "true"
|
||||
}
|
||||
|
||||
// BindWait binds the wait flag separately so it is not called by other create
|
||||
|
||||
@@ -73,7 +73,7 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
|
||||
}
|
||||
|
||||
first := true
|
||||
for _, backup := range backups.Items {
|
||||
for i, backup := range backups.Items {
|
||||
deleteRequestListOptions := pkgbackup.NewDeleteBackupRequestListOptions(backup.Name, string(backup.UID))
|
||||
deleteRequestList, err := veleroClient.VeleroV1().DeleteBackupRequests(f.Namespace()).List(context.TODO(), deleteRequestListOptions)
|
||||
if err != nil {
|
||||
@@ -102,7 +102,7 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
|
||||
}
|
||||
}
|
||||
|
||||
s := output.DescribeBackup(context.Background(), kbClient, &backup, deleteRequestList.Items, podVolumeBackupList.Items, vscList.Items, details, veleroClient, insecureSkipTLSVerify, caCertFile) //nolint
|
||||
s := output.DescribeBackup(context.Background(), kbClient, &backups.Items[i], deleteRequestList.Items, podVolumeBackupList.Items, vscList.Items, details, veleroClient, insecureSkipTLSVerify, caCertFile)
|
||||
if first {
|
||||
first = false
|
||||
fmt.Print(s)
|
||||
|
||||
@@ -209,10 +209,10 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
|
||||
if err := kbClient.List(context.Background(), locations, &kbclient.ListOptions{Namespace: f.Namespace()}); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
for _, location := range locations.Items {
|
||||
for i, location := range locations.Items {
|
||||
if location.Spec.Default {
|
||||
location.Spec.Default = false
|
||||
if err := kbClient.Update(context.Background(), &location, &kbclient.UpdateOptions{}); err != nil { //nolint
|
||||
if err := kbClient.Update(context.Background(), &locations.Items[i], &kbclient.UpdateOptions{}); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
break
|
||||
|
||||
@@ -115,8 +115,8 @@ func Run(f client.Factory, o *cli.DeleteOptions) error {
|
||||
}
|
||||
|
||||
// Create a backup-location deletion request for each
|
||||
for _, location := range locations.Items {
|
||||
if err := kbClient.Delete(context.Background(), &location, &kbclient.DeleteOptions{}); err != nil { //nolint
|
||||
for i, location := range locations.Items {
|
||||
if err := kbClient.Delete(context.Background(), &locations.Items[i], &kbclient.DeleteOptions{}); err != nil {
|
||||
errs = append(errs, errors.WithStack(err))
|
||||
continue
|
||||
}
|
||||
@@ -162,8 +162,8 @@ func findAssociatedBackupRepos(client kbclient.Client, bslName, ns string) (vele
|
||||
|
||||
func deleteBackups(client kbclient.Client, backups velerov1api.BackupList) []error {
|
||||
var errs []error
|
||||
for _, backup := range backups.Items {
|
||||
if err := client.Delete(context.Background(), &backup, &kbclient.DeleteOptions{}); err != nil { //nolint
|
||||
for i, backup := range backups.Items {
|
||||
if err := client.Delete(context.Background(), &backups.Items[i], &kbclient.DeleteOptions{}); err != nil {
|
||||
errs = append(errs, errors.WithStack(fmt.Errorf("delete backup %q associated with deleted BSL: %w", backup.Name, err)))
|
||||
continue
|
||||
}
|
||||
@@ -174,8 +174,8 @@ func deleteBackups(client kbclient.Client, backups velerov1api.BackupList) []err
|
||||
|
||||
func deleteBackupRepos(client kbclient.Client, repos velerov1api.BackupRepositoryList) []error {
|
||||
var errs []error
|
||||
for _, repo := range repos.Items {
|
||||
if err := client.Delete(context.Background(), &repo, &kbclient.DeleteOptions{}); err != nil { //nolint
|
||||
for i, repo := range repos.Items {
|
||||
if err := client.Delete(context.Background(), &repos.Items[i], &kbclient.DeleteOptions{}); err != nil {
|
||||
errs = append(errs, errors.WithStack(fmt.Errorf("delete backup repository %q associated with deleted BSL: %w", repo.Name, err)))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -120,7 +120,7 @@ func (o *SetOptions) Run(c *cobra.Command, f client.Factory) error {
|
||||
if err := kbClient.List(context.Background(), locations, &kbclient.ListOptions{Namespace: f.Namespace()}); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
for _, location := range locations.Items {
|
||||
for i, location := range locations.Items {
|
||||
if !location.Spec.Default {
|
||||
continue
|
||||
}
|
||||
@@ -129,7 +129,7 @@ func (o *SetOptions) Run(c *cobra.Command, f client.Factory) error {
|
||||
break
|
||||
}
|
||||
location.Spec.Default = false
|
||||
if err := kbClient.Update(context.Background(), &location, &kbclient.UpdateOptions{}); err != nil { //nolint
|
||||
if err := kbClient.Update(context.Background(), &locations.Items[i], &kbclient.UpdateOptions{}); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
break
|
||||
|
||||
@@ -294,7 +294,7 @@ func (s *nodeAgentServer) markInProgressPVBsFailed(client ctrlclient.Client) {
|
||||
s.logger.WithError(errors.WithStack(err)).Error("failed to list podvolumebackups")
|
||||
return
|
||||
}
|
||||
for _, pvb := range pvbs.Items {
|
||||
for i, pvb := range pvbs.Items {
|
||||
if pvb.Status.Phase != velerov1api.PodVolumeBackupPhaseInProgress {
|
||||
s.logger.Debugf("the status of podvolumebackup %q is %q, skip", pvb.GetName(), pvb.Status.Phase)
|
||||
continue
|
||||
@@ -307,7 +307,7 @@ func (s *nodeAgentServer) markInProgressPVBsFailed(client ctrlclient.Client) {
|
||||
pvb.Status.Phase = velerov1api.PodVolumeBackupPhaseFailed
|
||||
pvb.Status.Message = fmt.Sprintf("get a podvolumebackup with status %q during the server starting, mark it as %q", velerov1api.PodVolumeBackupPhaseInProgress, pvb.Status.Phase)
|
||||
pvb.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()}
|
||||
if err := client.Patch(s.ctx, &pvb, ctrlclient.MergeFrom(original)); err != nil { //nolint
|
||||
if err := client.Patch(s.ctx, &pvbs.Items[i], ctrlclient.MergeFrom(original)); err != nil {
|
||||
s.logger.WithError(errors.WithStack(err)).Errorf("failed to patch podvolumebackup %q", pvb.GetName())
|
||||
continue
|
||||
}
|
||||
@@ -321,7 +321,7 @@ func (s *nodeAgentServer) markInProgressPVRsFailed(client ctrlclient.Client) {
|
||||
s.logger.WithError(errors.WithStack(err)).Error("failed to list podvolumerestores")
|
||||
return
|
||||
}
|
||||
for _, pvr := range pvrs.Items {
|
||||
for i, pvr := range pvrs.Items {
|
||||
if pvr.Status.Phase != velerov1api.PodVolumeRestorePhaseInProgress {
|
||||
s.logger.Debugf("the status of podvolumerestore %q is %q, skip", pvr.GetName(), pvr.Status.Phase)
|
||||
continue
|
||||
@@ -345,7 +345,7 @@ func (s *nodeAgentServer) markInProgressPVRsFailed(client ctrlclient.Client) {
|
||||
pvr.Status.Phase = velerov1api.PodVolumeRestorePhaseFailed
|
||||
pvr.Status.Message = fmt.Sprintf("get a podvolumerestore with status %q during the server starting, mark it as %q", velerov1api.PodVolumeRestorePhaseInProgress, pvr.Status.Phase)
|
||||
pvr.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()}
|
||||
if err := client.Patch(s.ctx, &pvr, ctrlclient.MergeFrom(original)); err != nil { //nolint
|
||||
if err := client.Patch(s.ctx, &pvrs.Items[i], ctrlclient.MergeFrom(original)); err != nil {
|
||||
s.logger.WithError(errors.WithStack(err)).Errorf("failed to patch podvolumerestore %q", pvr.GetName())
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -107,10 +107,6 @@ func NewCreateOptions() *CreateOptions {
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
strTrue = "true"
|
||||
)
|
||||
|
||||
func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) {
|
||||
flags.StringVar(&o.BackupName, "from-backup", "", "Backup to restore from")
|
||||
flags.StringVar(&o.ScheduleName, "from-schedule", "", "Schedule to restore from")
|
||||
@@ -127,18 +123,18 @@ func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) {
|
||||
f := flags.VarPF(&o.RestoreVolumes, "restore-volumes", "", "Whether to restore volumes from snapshots.")
|
||||
// this allows the user to just specify "--restore-volumes" as shorthand for "--restore-volumes=true"
|
||||
// like a normal bool flag
|
||||
f.NoOptDefVal = strTrue
|
||||
f.NoOptDefVal = "true"
|
||||
|
||||
f = flags.VarPF(&o.PreserveNodePorts, "preserve-nodeports", "", "Whether to preserve nodeports of Services when restoring.")
|
||||
// this allows the user to just specify "--preserve-nodeports" as shorthand for "--preserve-nodeports=true"
|
||||
// like a normal bool flag
|
||||
f.NoOptDefVal = strTrue
|
||||
f.NoOptDefVal = "true"
|
||||
|
||||
f = flags.VarPF(&o.IncludeClusterResources, "include-cluster-resources", "", "Include cluster-scoped resources in the restore.")
|
||||
f.NoOptDefVal = strTrue
|
||||
f.NoOptDefVal = "true"
|
||||
|
||||
f = flags.VarPF(&o.AllowPartiallyFailed, "allow-partially-failed", "", "If using --from-schedule, whether to consider PartiallyFailed backups when looking for the most recent one. This flag has no effect if not using --from-schedule.")
|
||||
f.NoOptDefVal = strTrue
|
||||
f.NoOptDefVal = "true"
|
||||
|
||||
flags.BoolVarP(&o.Wait, "wait", "w", o.Wait, "Wait for the operation to complete.")
|
||||
}
|
||||
|
||||
@@ -68,14 +68,14 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
|
||||
}
|
||||
|
||||
first := true
|
||||
for _, restore := range restores.Items {
|
||||
for i, restore := range restores.Items {
|
||||
opts := newPodVolumeRestoreListOptions(restore.Name)
|
||||
podvolumeRestoreList, err := veleroClient.VeleroV1().PodVolumeRestores(f.Namespace()).List(context.TODO(), opts)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error getting PodVolumeRestores for restore %s: %v\n", restore.Name, err)
|
||||
}
|
||||
|
||||
s := output.DescribeRestore(context.Background(), kbClient, &restore, podvolumeRestoreList.Items, details, veleroClient, insecureSkipTLSVerify, caCertFile) //nolint
|
||||
s := output.DescribeRestore(context.Background(), kbClient, &restores.Items[i], podvolumeRestoreList.Items, details, veleroClient, insecureSkipTLSVerify, caCertFile)
|
||||
if first {
|
||||
first = false
|
||||
fmt.Print(s)
|
||||
|
||||
@@ -53,8 +53,8 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
|
||||
}
|
||||
|
||||
first := true
|
||||
for _, schedule := range schedules.Items {
|
||||
s := output.DescribeSchedule(&schedule) //nolint
|
||||
for i := range schedules.Items {
|
||||
s := output.DescribeSchedule(&schedules.Items[i])
|
||||
if first {
|
||||
first = false
|
||||
fmt.Print(s)
|
||||
|
||||
@@ -939,7 +939,7 @@ func markInProgressBackupsFailed(ctx context.Context, client ctrlclient.Client,
|
||||
return
|
||||
}
|
||||
|
||||
for _, backup := range backups.Items {
|
||||
for i, backup := range backups.Items {
|
||||
if backup.Status.Phase != velerov1api.BackupPhaseInProgress {
|
||||
log.Debugf("the status of backup %q is %q, skip", backup.GetName(), backup.Status.Phase)
|
||||
continue
|
||||
@@ -948,7 +948,7 @@ func markInProgressBackupsFailed(ctx context.Context, client ctrlclient.Client,
|
||||
updated.Status.Phase = velerov1api.BackupPhaseFailed
|
||||
updated.Status.FailureReason = fmt.Sprintf("get a backup with status %q during the server starting, mark it as %q", velerov1api.BackupPhaseInProgress, updated.Status.Phase)
|
||||
updated.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()}
|
||||
if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&backup)); err != nil { //nolint
|
||||
if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&backups.Items[i])); err != nil {
|
||||
log.WithError(errors.WithStack(err)).Errorf("failed to patch backup %q", backup.GetName())
|
||||
continue
|
||||
}
|
||||
@@ -962,7 +962,7 @@ func markInProgressRestoresFailed(ctx context.Context, client ctrlclient.Client,
|
||||
log.WithError(errors.WithStack(err)).Error("failed to list restores")
|
||||
return
|
||||
}
|
||||
for _, restore := range restores.Items {
|
||||
for i, restore := range restores.Items {
|
||||
if restore.Status.Phase != velerov1api.RestorePhaseInProgress {
|
||||
log.Debugf("the status of restore %q is %q, skip", restore.GetName(), restore.Status.Phase)
|
||||
continue
|
||||
@@ -971,7 +971,7 @@ func markInProgressRestoresFailed(ctx context.Context, client ctrlclient.Client,
|
||||
updated.Status.Phase = velerov1api.RestorePhaseFailed
|
||||
updated.Status.FailureReason = fmt.Sprintf("get a restore with status %q during the server starting, mark it as %q", velerov1api.RestorePhaseInProgress, updated.Status.Phase)
|
||||
updated.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()}
|
||||
if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&restore)); err != nil { //nolint
|
||||
if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&restores.Items[i])); err != nil {
|
||||
log.WithError(errors.WithStack(err)).Errorf("failed to patch restore %q", restore.GetName())
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -325,11 +325,11 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
}); err != nil {
|
||||
log.WithError(errors.WithStack(err)).Error("Error listing restore API objects")
|
||||
} else {
|
||||
for _, restore := range restoreList.Items {
|
||||
for i, restore := range restoreList.Items {
|
||||
if restore.Spec.BackupName != backup.Name {
|
||||
continue
|
||||
}
|
||||
restoreLog := log.WithField("restore", kube.NamespaceAndName(&restore)) //nolint
|
||||
restoreLog := log.WithField("restore", kube.NamespaceAndName(&restoreList.Items[i]))
|
||||
|
||||
restoreLog.Info("Deleting restore log/results from backup storage")
|
||||
if err := backupStore.DeleteRestore(restore.Name); err != nil {
|
||||
@@ -339,8 +339,8 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
}
|
||||
|
||||
restoreLog.Info("Deleting restore referencing backup")
|
||||
if err := r.Delete(ctx, &restore); err != nil { //nolint
|
||||
errs = append(errs, errors.Wrapf(err, "error deleting restore %s", kube.NamespaceAndName(&restore)).Error()) //nolint
|
||||
if err := r.Delete(ctx, &restoreList.Items[i]); err != nil {
|
||||
errs = append(errs, errors.Wrapf(err, "error deleting restore %s", kube.NamespaceAndName(&restoreList.Items[i])).Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -423,11 +423,11 @@ func (r *backupDeletionReconciler) deleteExistingDeletionRequests(ctx context.Co
|
||||
return []error{errors.Wrap(err, "error listing existing DeleteBackupRequests for backup")}
|
||||
}
|
||||
var errs []error
|
||||
for _, dbr := range dbrList.Items {
|
||||
for i, dbr := range dbrList.Items {
|
||||
if dbr.Name == req.Name {
|
||||
continue
|
||||
}
|
||||
if err := r.Delete(ctx, &dbr); err != nil { //nolint
|
||||
if err := r.Delete(ctx, &dbrList.Items[i]); err != nil {
|
||||
errs = append(errs, errors.WithStack(err))
|
||||
} else {
|
||||
log.Infof("deletion request '%s' removed.", dbr.Name)
|
||||
|
||||
@@ -317,13 +317,13 @@ func (b *backupSyncReconciler) deleteOrphanedBackups(ctx context.Context, locati
|
||||
return
|
||||
}
|
||||
|
||||
for _, backup := range backupList.Items {
|
||||
for i, backup := range backupList.Items {
|
||||
log = log.WithField("backup", backup.Name)
|
||||
if backup.Status.Phase != velerov1api.BackupPhaseCompleted || backupStoreBackups.Has(backup.Name) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := b.client.Delete(ctx, &backup, &client.DeleteOptions{}); err != nil { //nolint
|
||||
if err := b.client.Delete(ctx, &backupList.Items[i], &client.DeleteOptions{}); err != nil {
|
||||
log.WithError(errors.WithStack(err)).Error("Error deleting orphaned backup from cluster")
|
||||
} else {
|
||||
log.Debug("Deleted orphaned backup from cluster")
|
||||
@@ -344,10 +344,10 @@ func (b *backupSyncReconciler) deleteCSISnapshotsByBackup(ctx context.Context, b
|
||||
if err := b.client.List(ctx, &vsList, listOptions); err != nil {
|
||||
log.WithError(err).Warnf("Failed to list volumesnapshots for backup: %s, the deletion will be skipped", backupName)
|
||||
} else {
|
||||
for _, vs := range vsList.Items {
|
||||
for i, vs := range vsList.Items {
|
||||
name := kube.NamespaceAndName(vs.GetObjectMeta())
|
||||
log.Debugf("Deleting volumesnapshot %s", name)
|
||||
if err := b.client.Delete(context.TODO(), &vs); err != nil { //nolint
|
||||
if err := b.client.Delete(context.TODO(), &vsList.Items[i]); err != nil {
|
||||
log.WithError(err).Warnf("Failed to delete volumesnapshot %s", name)
|
||||
}
|
||||
}
|
||||
@@ -379,11 +379,13 @@ func backupSyncSourceOrderFunc(objList client.ObjectList) client.ObjectList {
|
||||
bslArray = append(bslArray, &inputBSLList.Items[i])
|
||||
// append everything before the default
|
||||
for _, bsl := range inputBSLList.Items[:i] {
|
||||
bslArray = append(bslArray, &bsl) //nolint
|
||||
cpBsl := bsl
|
||||
bslArray = append(bslArray, &cpBsl)
|
||||
}
|
||||
// append everything after the default
|
||||
for _, bsl := range inputBSLList.Items[i+1:] {
|
||||
bslArray = append(bslArray, &bsl) //nolint
|
||||
cpBsl := bsl
|
||||
bslArray = append(bslArray, &cpBsl)
|
||||
}
|
||||
meta.SetList(resultBSLList, bslArray)
|
||||
|
||||
|
||||
@@ -61,12 +61,12 @@ func IsRunningInNode(ctx context.Context, namespace string, nodeName string, pod
|
||||
return errors.Wrap(err, "failed to list daemonset pods")
|
||||
}
|
||||
|
||||
for _, pod := range pods.Items {
|
||||
if kube.IsPodRunning(&pod) != nil { //nolint
|
||||
for i := range pods.Items {
|
||||
if kube.IsPodRunning(&pods.Items[i]) != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if pod.Spec.NodeName == nodeName {
|
||||
if pods.Items[i].Spec.NodeName == nodeName {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -506,7 +506,7 @@ func TestGetRepoPassword(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
password, err := getRepoPassword(urp.credentialGetter.FromSecret, RepoParam{})
|
||||
password, err := getRepoPassword(urp.credentialGetter.FromSecret)
|
||||
|
||||
require.Equal(t, tc.expected, password)
|
||||
|
||||
|
||||
@@ -70,15 +70,15 @@ func (a *AdmissionWebhookConfigurationAction) Execute(input *velero.RestoreItemA
|
||||
return velero.NewRestoreItemActionExecuteOutput(input.Item), nil
|
||||
}
|
||||
newWebhooks := make([]interface{}, 0)
|
||||
for i, entry := range webhooks {
|
||||
for i := range webhooks {
|
||||
logger2 := logger.WithField("index", i)
|
||||
obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&entry) //nolint
|
||||
obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&webhooks[i])
|
||||
if err != nil {
|
||||
logger2.Errorf("failed to convert the webhook entry, error: %v, it will be dropped", err)
|
||||
continue
|
||||
}
|
||||
s, _, _ := unstructured.NestedString(obj, "sideEffects")
|
||||
if s != "None" && s != "NoneOnDryRun" { //nolint
|
||||
if s != "None" && s != "NoneOnDryRun" {
|
||||
logger2.Infof("reset the invalid sideEffects value '%s' to 'None'", s)
|
||||
obj["sideEffects"] = "None"
|
||||
}
|
||||
|
||||
@@ -123,9 +123,9 @@ func EnsureNamespaceExistsAndIsReady(namespace *corev1api.Namespace, client core
|
||||
func GetVolumeDirectory(ctx context.Context, log logrus.FieldLogger, pod *corev1api.Pod, volumeName string, cli client.Client) (string, error) {
|
||||
var volume *corev1api.Volume
|
||||
|
||||
for _, item := range pod.Spec.Volumes {
|
||||
if item.Name == volumeName {
|
||||
volume = &item //nolint
|
||||
for i := range pod.Spec.Volumes {
|
||||
if pod.Spec.Volumes[i].Name == volumeName {
|
||||
volume = &pod.Spec.Volumes[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user