mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-07 05:46:37 +00:00
Merge pull request #5459 from Lyndon-Li/velero-docker-image-go-1.18
Upgrade velero docker image to use go 1.18
This commit is contained in:
1
changelogs/unreleased/5459-lyndon
Normal file
1
changelogs/unreleased/5459-lyndon
Normal file
@@ -0,0 +1 @@
|
||||
Upgrade velero docker image to use go 1.18 and upgrade golangci-lint to 1.45.0
|
||||
@@ -25,8 +25,8 @@ run:
|
||||
# from this option's value (see skip-dirs-use-default).
|
||||
# "/" will be replaced by current OS file path separator to properly work
|
||||
# on Windows.
|
||||
#skip-dirs:
|
||||
# - src/external_libs
|
||||
skip-dirs:
|
||||
- test/e2e/*
|
||||
# - autogenerated_by_my_lib
|
||||
|
||||
# default is true. Enables skipping of directories:
|
||||
@@ -39,8 +39,8 @@ run:
|
||||
# autogenerated files. If it's not please let us know.
|
||||
# "/" will be replaced by current OS file path separator to properly work
|
||||
# on Windows.
|
||||
# skip-files:
|
||||
# - ".*\\.my\\.go$"
|
||||
skip-files:
|
||||
- ".*_test.go$"
|
||||
# - lib/bad.go
|
||||
|
||||
# by default isn't set. If set we pass it to "go list -mod={option}". From "go help modules":
|
||||
@@ -117,7 +117,7 @@ linters-settings:
|
||||
# minimal length of string constant, 3 by default
|
||||
min-len: 3
|
||||
# minimal occurrences count to trigger, 3 by default
|
||||
min-occurrences: 3
|
||||
min-occurrences: 5
|
||||
gocritic:
|
||||
# Which checks should be enabled; can't be combined with 'disabled-checks';
|
||||
# See https://go-critic.github.io/overview#checks-overview
|
||||
@@ -320,7 +320,7 @@ linters:
|
||||
fast: false
|
||||
|
||||
|
||||
#issues:
|
||||
issues:
|
||||
# # List of regexps of issue texts to exclude, empty list by default.
|
||||
# # But independently from this option we use default exclude patterns,
|
||||
# # it can be disabled by `exclude-use-default: false`. To list all
|
||||
@@ -359,7 +359,7 @@ linters:
|
||||
# it can be disabled by this option. To list all
|
||||
# excluded by default patterns execute `golangci-lint run --help`.
|
||||
# Default value for this option is true.
|
||||
exclude-use-default: false
|
||||
exclude-use-default: true
|
||||
|
||||
# The default value is false. If set to true exclude and exclude-rules
|
||||
# regular expressions become case sensitive.
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM golang:1.17
|
||||
FROM golang:1.18
|
||||
|
||||
ARG GOPROXY
|
||||
|
||||
@@ -36,11 +36,11 @@ RUN wget --quiet https://github.com/kubernetes-sigs/kubebuilder/releases/downloa
|
||||
chmod +x /usr/local/kubebuilder/bin/kubebuilder
|
||||
|
||||
# get controller-tools
|
||||
RUN go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.7.0
|
||||
RUN go install sigs.k8s.io/controller-tools/cmd/controller-gen@v0.7.0
|
||||
|
||||
# get goimports (the revision is pinned so we don't indiscriminately update, but the particular commit
|
||||
# is not important)
|
||||
RUN go get golang.org/x/tools/cmd/goimports@11e9d9cc0042e6bd10337d4d2c3e5d9295508e7d
|
||||
RUN go install golang.org/x/tools/cmd/goimports@11e9d9cc0042e6bd10337d4d2c3e5d9295508e7d
|
||||
|
||||
# get protoc compiler and golang plugin
|
||||
WORKDIR /root
|
||||
@@ -49,7 +49,7 @@ RUN wget --quiet https://github.com/protocolbuffers/protobuf/releases/download/v
|
||||
unzip protoc-3.14.0-linux-x86_64.zip && \
|
||||
mv bin/protoc /usr/bin/protoc && \
|
||||
chmod +x /usr/bin/protoc
|
||||
RUN go get github.com/golang/protobuf/protoc-gen-go@v1.4.3
|
||||
RUN go install github.com/golang/protobuf/protoc-gen-go@v1.4.3
|
||||
|
||||
# get goreleaser
|
||||
RUN wget --quiet https://github.com/goreleaser/goreleaser/releases/download/v0.120.8/goreleaser_Linux_x86_64.tar.gz && \
|
||||
@@ -58,7 +58,7 @@ RUN wget --quiet https://github.com/goreleaser/goreleaser/releases/download/v0.1
|
||||
chmod +x /usr/bin/goreleaser
|
||||
|
||||
# get golangci-lint
|
||||
RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.27.0
|
||||
RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.45.0
|
||||
|
||||
# install kubectl
|
||||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
|
||||
|
||||
@@ -84,7 +84,7 @@ func (e *Extractor) readBackup(tarRdr *tar.Reader) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
target := filepath.Join(dir, header.Name)
|
||||
target := filepath.Join(dir, header.Name) //nolint:gosec
|
||||
|
||||
switch header.Typeflag {
|
||||
case tar.TypeDir:
|
||||
|
||||
@@ -151,7 +151,7 @@ func sortResourcesByOrder(log logrus.FieldLogger, items []*kubernetesResource, o
|
||||
}
|
||||
|
||||
// getOrderedResourcesForType gets order of resourceType from orderResources.
|
||||
func getOrderedResourcesForType(log logrus.FieldLogger, orderedResources map[string]string, resourceType string) []string {
|
||||
func getOrderedResourcesForType(orderedResources map[string]string, resourceType string) []string {
|
||||
if orderedResources == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -175,7 +175,7 @@ func (r *itemCollector) getResourceItems(log logrus.FieldLogger, gv schema.Group
|
||||
clusterScoped = !resource.Namespaced
|
||||
)
|
||||
|
||||
orders := getOrderedResourcesForType(log, r.backupRequest.Backup.Spec.OrderedResources, resource.Name)
|
||||
orders := getOrderedResourcesForType(r.backupRequest.Backup.Spec.OrderedResources, resource.Name)
|
||||
// Getting the preferred group version of this resource
|
||||
preferredGVR, _, err := r.discoveryHelper.ResourceFor(gr.WithVersion(""))
|
||||
if err != nil {
|
||||
|
||||
@@ -73,7 +73,7 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
|
||||
}
|
||||
|
||||
first := true
|
||||
for _, backup := range backups.Items {
|
||||
for i, backup := range backups.Items {
|
||||
deleteRequestListOptions := pkgbackup.NewDeleteBackupRequestListOptions(backup.Name, string(backup.UID))
|
||||
deleteRequestList, err := veleroClient.VeleroV1().DeleteBackupRequests(f.Namespace()).List(context.TODO(), deleteRequestListOptions)
|
||||
if err != nil {
|
||||
@@ -102,7 +102,7 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
|
||||
}
|
||||
}
|
||||
|
||||
s := output.DescribeBackup(context.Background(), kbClient, &backup, deleteRequestList.Items, podVolumeBackupList.Items, vscList.Items, details, veleroClient, insecureSkipTLSVerify, caCertFile)
|
||||
s := output.DescribeBackup(context.Background(), kbClient, &backups.Items[i], deleteRequestList.Items, podVolumeBackupList.Items, vscList.Items, details, veleroClient, insecureSkipTLSVerify, caCertFile)
|
||||
if first {
|
||||
first = false
|
||||
fmt.Print(s)
|
||||
|
||||
@@ -209,10 +209,10 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
|
||||
if err := kbClient.List(context.Background(), locations, &kbclient.ListOptions{Namespace: f.Namespace()}); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
for _, location := range locations.Items {
|
||||
for i, location := range locations.Items {
|
||||
if location.Spec.Default {
|
||||
location.Spec.Default = false
|
||||
if err := kbClient.Update(context.Background(), &location, &kbclient.UpdateOptions{}); err != nil {
|
||||
if err := kbClient.Update(context.Background(), &locations.Items[i], &kbclient.UpdateOptions{}); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
break
|
||||
|
||||
@@ -115,8 +115,8 @@ func Run(f client.Factory, o *cli.DeleteOptions) error {
|
||||
}
|
||||
|
||||
// Create a backup-location deletion request for each
|
||||
for _, location := range locations.Items {
|
||||
if err := kbClient.Delete(context.Background(), &location, &kbclient.DeleteOptions{}); err != nil {
|
||||
for i, location := range locations.Items {
|
||||
if err := kbClient.Delete(context.Background(), &locations.Items[i], &kbclient.DeleteOptions{}); err != nil {
|
||||
errs = append(errs, errors.WithStack(err))
|
||||
continue
|
||||
}
|
||||
@@ -162,8 +162,8 @@ func findAssociatedBackupRepos(client kbclient.Client, bslName, ns string) (vele
|
||||
|
||||
func deleteBackups(client kbclient.Client, backups velerov1api.BackupList) []error {
|
||||
var errs []error
|
||||
for _, backup := range backups.Items {
|
||||
if err := client.Delete(context.Background(), &backup, &kbclient.DeleteOptions{}); err != nil {
|
||||
for i, backup := range backups.Items {
|
||||
if err := client.Delete(context.Background(), &backups.Items[i], &kbclient.DeleteOptions{}); err != nil {
|
||||
errs = append(errs, errors.WithStack(fmt.Errorf("delete backup %q associated with deleted BSL: %w", backup.Name, err)))
|
||||
continue
|
||||
}
|
||||
@@ -174,8 +174,8 @@ func deleteBackups(client kbclient.Client, backups velerov1api.BackupList) []err
|
||||
|
||||
func deleteBackupRepos(client kbclient.Client, repos velerov1api.BackupRepositoryList) []error {
|
||||
var errs []error
|
||||
for _, repo := range repos.Items {
|
||||
if err := client.Delete(context.Background(), &repo, &kbclient.DeleteOptions{}); err != nil {
|
||||
for i, repo := range repos.Items {
|
||||
if err := client.Delete(context.Background(), &repos.Items[i], &kbclient.DeleteOptions{}); err != nil {
|
||||
errs = append(errs, errors.WithStack(fmt.Errorf("delete backup repository %q associated with deleted BSL: %w", repo.Name, err)))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -120,7 +120,7 @@ func (o *SetOptions) Run(c *cobra.Command, f client.Factory) error {
|
||||
if err := kbClient.List(context.Background(), locations, &kbclient.ListOptions{Namespace: f.Namespace()}); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
for _, location := range locations.Items {
|
||||
for i, location := range locations.Items {
|
||||
if !location.Spec.Default {
|
||||
continue
|
||||
}
|
||||
@@ -129,7 +129,7 @@ func (o *SetOptions) Run(c *cobra.Command, f client.Factory) error {
|
||||
break
|
||||
}
|
||||
location.Spec.Default = false
|
||||
if err := kbClient.Update(context.Background(), &location, &kbclient.UpdateOptions{}); err != nil {
|
||||
if err := kbClient.Update(context.Background(), &locations.Items[i], &kbclient.UpdateOptions{}); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
break
|
||||
|
||||
@@ -294,7 +294,7 @@ func (s *nodeAgentServer) markInProgressPVBsFailed(client ctrlclient.Client) {
|
||||
s.logger.WithError(errors.WithStack(err)).Error("failed to list podvolumebackups")
|
||||
return
|
||||
}
|
||||
for _, pvb := range pvbs.Items {
|
||||
for i, pvb := range pvbs.Items {
|
||||
if pvb.Status.Phase != velerov1api.PodVolumeBackupPhaseInProgress {
|
||||
s.logger.Debugf("the status of podvolumebackup %q is %q, skip", pvb.GetName(), pvb.Status.Phase)
|
||||
continue
|
||||
@@ -307,7 +307,7 @@ func (s *nodeAgentServer) markInProgressPVBsFailed(client ctrlclient.Client) {
|
||||
pvb.Status.Phase = velerov1api.PodVolumeBackupPhaseFailed
|
||||
pvb.Status.Message = fmt.Sprintf("get a podvolumebackup with status %q during the server starting, mark it as %q", velerov1api.PodVolumeBackupPhaseInProgress, pvb.Status.Phase)
|
||||
pvb.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()}
|
||||
if err := client.Patch(s.ctx, &pvb, ctrlclient.MergeFrom(original)); err != nil {
|
||||
if err := client.Patch(s.ctx, &pvbs.Items[i], ctrlclient.MergeFrom(original)); err != nil {
|
||||
s.logger.WithError(errors.WithStack(err)).Errorf("failed to patch podvolumebackup %q", pvb.GetName())
|
||||
continue
|
||||
}
|
||||
@@ -321,7 +321,7 @@ func (s *nodeAgentServer) markInProgressPVRsFailed(client ctrlclient.Client) {
|
||||
s.logger.WithError(errors.WithStack(err)).Error("failed to list podvolumerestores")
|
||||
return
|
||||
}
|
||||
for _, pvr := range pvrs.Items {
|
||||
for i, pvr := range pvrs.Items {
|
||||
if pvr.Status.Phase != velerov1api.PodVolumeRestorePhaseInProgress {
|
||||
s.logger.Debugf("the status of podvolumerestore %q is %q, skip", pvr.GetName(), pvr.Status.Phase)
|
||||
continue
|
||||
@@ -345,7 +345,7 @@ func (s *nodeAgentServer) markInProgressPVRsFailed(client ctrlclient.Client) {
|
||||
pvr.Status.Phase = velerov1api.PodVolumeRestorePhaseFailed
|
||||
pvr.Status.Message = fmt.Sprintf("get a podvolumerestore with status %q during the server starting, mark it as %q", velerov1api.PodVolumeRestorePhaseInProgress, pvr.Status.Phase)
|
||||
pvr.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()}
|
||||
if err := client.Patch(s.ctx, &pvr, ctrlclient.MergeFrom(original)); err != nil {
|
||||
if err := client.Patch(s.ctx, &pvrs.Items[i], ctrlclient.MergeFrom(original)); err != nil {
|
||||
s.logger.WithError(errors.WithStack(err)).Errorf("failed to patch podvolumerestore %q", pvr.GetName())
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -68,14 +68,14 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
|
||||
}
|
||||
|
||||
first := true
|
||||
for _, restore := range restores.Items {
|
||||
for i, restore := range restores.Items {
|
||||
opts := newPodVolumeRestoreListOptions(restore.Name)
|
||||
podvolumeRestoreList, err := veleroClient.VeleroV1().PodVolumeRestores(f.Namespace()).List(context.TODO(), opts)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error getting PodVolumeRestores for restore %s: %v\n", restore.Name, err)
|
||||
}
|
||||
|
||||
s := output.DescribeRestore(context.Background(), kbClient, &restore, podvolumeRestoreList.Items, details, veleroClient, insecureSkipTLSVerify, caCertFile)
|
||||
s := output.DescribeRestore(context.Background(), kbClient, &restores.Items[i], podvolumeRestoreList.Items, details, veleroClient, insecureSkipTLSVerify, caCertFile)
|
||||
if first {
|
||||
first = false
|
||||
fmt.Print(s)
|
||||
|
||||
@@ -53,8 +53,8 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
|
||||
}
|
||||
|
||||
first := true
|
||||
for _, schedule := range schedules.Items {
|
||||
s := output.DescribeSchedule(&schedule)
|
||||
for i := range schedules.Items {
|
||||
s := output.DescribeSchedule(&schedules.Items[i])
|
||||
if first {
|
||||
first = false
|
||||
fmt.Print(s)
|
||||
|
||||
@@ -969,7 +969,7 @@ func markInProgressBackupsFailed(ctx context.Context, client ctrlclient.Client,
|
||||
return
|
||||
}
|
||||
|
||||
for _, backup := range backups.Items {
|
||||
for i, backup := range backups.Items {
|
||||
if backup.Status.Phase != velerov1api.BackupPhaseInProgress {
|
||||
log.Debugf("the status of backup %q is %q, skip", backup.GetName(), backup.Status.Phase)
|
||||
continue
|
||||
@@ -978,7 +978,7 @@ func markInProgressBackupsFailed(ctx context.Context, client ctrlclient.Client,
|
||||
updated.Status.Phase = velerov1api.BackupPhaseFailed
|
||||
updated.Status.FailureReason = fmt.Sprintf("get a backup with status %q during the server starting, mark it as %q", velerov1api.BackupPhaseInProgress, updated.Status.Phase)
|
||||
updated.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()}
|
||||
if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&backup)); err != nil {
|
||||
if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&backups.Items[i])); err != nil {
|
||||
log.WithError(errors.WithStack(err)).Errorf("failed to patch backup %q", backup.GetName())
|
||||
continue
|
||||
}
|
||||
@@ -992,7 +992,7 @@ func markInProgressRestoresFailed(ctx context.Context, client ctrlclient.Client,
|
||||
log.WithError(errors.WithStack(err)).Error("failed to list restores")
|
||||
return
|
||||
}
|
||||
for _, restore := range restores.Items {
|
||||
for i, restore := range restores.Items {
|
||||
if restore.Status.Phase != velerov1api.RestorePhaseInProgress {
|
||||
log.Debugf("the status of restore %q is %q, skip", restore.GetName(), restore.Status.Phase)
|
||||
continue
|
||||
@@ -1001,7 +1001,7 @@ func markInProgressRestoresFailed(ctx context.Context, client ctrlclient.Client,
|
||||
updated.Status.Phase = velerov1api.RestorePhaseFailed
|
||||
updated.Status.FailureReason = fmt.Sprintf("get a restore with status %q during the server starting, mark it as %q", velerov1api.RestorePhaseInProgress, updated.Status.Phase)
|
||||
updated.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()}
|
||||
if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&restore)); err != nil {
|
||||
if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&restores.Items[i])); err != nil {
|
||||
log.WithError(errors.WithStack(err)).Errorf("failed to patch restore %q", restore.GetName())
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -111,7 +111,7 @@ func Stream(ctx context.Context, kbClient kbclient.Client, namespace, name strin
|
||||
httpClient := new(http.Client)
|
||||
httpClient.Transport = &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: insecureSkipTLSVerify,
|
||||
InsecureSkipVerify: insecureSkipTLSVerify, //nolint:gosec
|
||||
RootCAs: caPool,
|
||||
},
|
||||
IdleConnTimeout: timeout,
|
||||
|
||||
@@ -125,7 +125,7 @@ func DescribeBackupSpec(d *Describer, spec velerov1api.BackupSpec) {
|
||||
}
|
||||
d.Printf("\tIncluded:\t%s\n", s)
|
||||
if len(spec.ExcludedNamespaces) == 0 {
|
||||
s = "<none>"
|
||||
s = emptyDisplay
|
||||
} else {
|
||||
s = strings.Join(spec.ExcludedNamespaces, ", ")
|
||||
}
|
||||
@@ -140,7 +140,7 @@ func DescribeBackupSpec(d *Describer, spec velerov1api.BackupSpec) {
|
||||
}
|
||||
d.Printf("\tIncluded:\t%s\n", s)
|
||||
if len(spec.ExcludedResources) == 0 {
|
||||
s = "<none>"
|
||||
s = emptyDisplay
|
||||
} else {
|
||||
s = strings.Join(spec.ExcludedResources, ", ")
|
||||
}
|
||||
@@ -149,7 +149,7 @@ func DescribeBackupSpec(d *Describer, spec velerov1api.BackupSpec) {
|
||||
d.Printf("\tCluster-scoped:\t%s\n", BoolPointerString(spec.IncludeClusterResources, "excluded", "included", "auto"))
|
||||
|
||||
d.Println()
|
||||
s = "<none>"
|
||||
s = emptyDisplay
|
||||
if spec.LabelSelector != nil {
|
||||
s = metav1.FormatLabelSelector(spec.LabelSelector)
|
||||
}
|
||||
@@ -169,7 +169,7 @@ func DescribeBackupSpec(d *Describer, spec velerov1api.BackupSpec) {
|
||||
|
||||
d.Println()
|
||||
if len(spec.Hooks.Resources) == 0 {
|
||||
d.Printf("Hooks:\t<none>\n")
|
||||
d.Printf("Hooks:\t" + emptyDisplay + "\n")
|
||||
} else {
|
||||
d.Printf("Hooks:\n")
|
||||
d.Printf("\tResources:\n")
|
||||
@@ -184,7 +184,7 @@ func DescribeBackupSpec(d *Describer, spec velerov1api.BackupSpec) {
|
||||
}
|
||||
d.Printf("\t\t\t\tIncluded:\t%s\n", s)
|
||||
if len(spec.ExcludedNamespaces) == 0 {
|
||||
s = "<none>"
|
||||
s = emptyDisplay
|
||||
} else {
|
||||
s = strings.Join(spec.ExcludedNamespaces, ", ")
|
||||
}
|
||||
@@ -199,14 +199,14 @@ func DescribeBackupSpec(d *Describer, spec velerov1api.BackupSpec) {
|
||||
}
|
||||
d.Printf("\t\t\t\tIncluded:\t%s\n", s)
|
||||
if len(spec.ExcludedResources) == 0 {
|
||||
s = "<none>"
|
||||
s = emptyDisplay
|
||||
} else {
|
||||
s = strings.Join(spec.ExcludedResources, ", ")
|
||||
}
|
||||
d.Printf("\t\t\t\tExcluded:\t%s\n", s)
|
||||
|
||||
d.Println()
|
||||
s = "<none>"
|
||||
s = emptyDisplay
|
||||
if backupResourceHookSpec.LabelSelector != nil {
|
||||
s = metav1.FormatLabelSelector(backupResourceHookSpec.LabelSelector)
|
||||
}
|
||||
|
||||
@@ -34,7 +34,10 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/util/encode"
|
||||
)
|
||||
|
||||
const downloadRequestTimeout = 30 * time.Second
|
||||
const (
|
||||
downloadRequestTimeout = 30 * time.Second
|
||||
emptyDisplay = "<none>"
|
||||
)
|
||||
|
||||
// BindFlags defines a set of output-specific flags within the provided
|
||||
// FlagSet.
|
||||
|
||||
@@ -107,7 +107,7 @@ func DescribeRestore(ctx context.Context, kbClient kbclient.Client, restore *vel
|
||||
}
|
||||
d.Printf("\tIncluded:\t%s\n", s)
|
||||
if len(restore.Spec.ExcludedNamespaces) == 0 {
|
||||
s = "<none>"
|
||||
s = emptyDisplay
|
||||
} else {
|
||||
s = strings.Join(restore.Spec.ExcludedNamespaces, ", ")
|
||||
}
|
||||
@@ -122,7 +122,7 @@ func DescribeRestore(ctx context.Context, kbClient kbclient.Client, restore *vel
|
||||
}
|
||||
d.Printf("\tIncluded:\t%s\n", s)
|
||||
if len(restore.Spec.ExcludedResources) == 0 {
|
||||
s = "<none>"
|
||||
s = emptyDisplay
|
||||
} else {
|
||||
s = strings.Join(restore.Spec.ExcludedResources, ", ")
|
||||
}
|
||||
@@ -134,7 +134,7 @@ func DescribeRestore(ctx context.Context, kbClient kbclient.Client, restore *vel
|
||||
d.DescribeMap("Namespace mappings", restore.Spec.NamespaceMapping)
|
||||
|
||||
d.Println()
|
||||
s = "<none>"
|
||||
s = emptyDisplay
|
||||
if restore.Spec.LabelSelector != nil {
|
||||
s = metav1.FormatLabelSelector(restore.Spec.LabelSelector)
|
||||
}
|
||||
@@ -149,7 +149,7 @@ func DescribeRestore(ctx context.Context, kbClient kbclient.Client, restore *vel
|
||||
}
|
||||
|
||||
d.Println()
|
||||
s = "<none>"
|
||||
s = emptyDisplay
|
||||
if restore.Spec.ExistingResourcePolicy != "" {
|
||||
s = string(restore.Spec.ExistingResourcePolicy)
|
||||
}
|
||||
|
||||
@@ -708,9 +708,8 @@ func (c *backupController) runBackup(backup *pkgbackup.Request) error {
|
||||
|
||||
// Delete the VolumeSnapshots created in the backup, when CSI feature is enabled.
|
||||
if len(volumeSnapshots) > 0 && len(volumeSnapshotContents) > 0 {
|
||||
c.deleteVolumeSnapshot(volumeSnapshots, volumeSnapshotContents, *backup, backupLog)
|
||||
c.deleteVolumeSnapshot(volumeSnapshots, volumeSnapshotContents, backupLog)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Mark completion timestamp before serializing and uploading.
|
||||
@@ -761,7 +760,7 @@ func (c *backupController) runBackup(backup *pkgbackup.Request) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if errs := persistBackup(backup, backupFile, logFile, backupStore, c.logger.WithField(Backup, kubeutil.NamespaceAndName(backup)), volumeSnapshots, volumeSnapshotContents, volumeSnapshotClasses); len(errs) > 0 {
|
||||
if errs := persistBackup(backup, backupFile, logFile, backupStore, volumeSnapshots, volumeSnapshotContents, volumeSnapshotClasses); len(errs) > 0 {
|
||||
fatalErrs = append(fatalErrs, errs...)
|
||||
}
|
||||
|
||||
@@ -805,7 +804,6 @@ func recordBackupMetrics(log logrus.FieldLogger, backup *velerov1api.Backup, bac
|
||||
func persistBackup(backup *pkgbackup.Request,
|
||||
backupContents, backupLog *os.File,
|
||||
backupStore persistence.BackupStore,
|
||||
log logrus.FieldLogger,
|
||||
csiVolumeSnapshots []snapshotv1api.VolumeSnapshot,
|
||||
csiVolumeSnapshotContents []snapshotv1api.VolumeSnapshotContent,
|
||||
csiVolumesnapshotClasses []snapshotv1api.VolumeSnapshotClass,
|
||||
@@ -954,7 +952,7 @@ func (c *backupController) checkVolumeSnapshotReadyToUse(ctx context.Context, vo
|
||||
// change DeletionPolicy to Retain before deleting VS, then change DeletionPolicy back to Delete.
|
||||
func (c *backupController) deleteVolumeSnapshot(volumeSnapshots []snapshotv1api.VolumeSnapshot,
|
||||
volumeSnapshotContents []snapshotv1api.VolumeSnapshotContent,
|
||||
backup pkgbackup.Request, logger logrus.FieldLogger) {
|
||||
logger logrus.FieldLogger) {
|
||||
var wg sync.WaitGroup
|
||||
vscMap := make(map[string]snapshotv1api.VolumeSnapshotContent)
|
||||
for _, vsc := range volumeSnapshotContents {
|
||||
|
||||
@@ -325,11 +325,11 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
}); err != nil {
|
||||
log.WithError(errors.WithStack(err)).Error("Error listing restore API objects")
|
||||
} else {
|
||||
for _, restore := range restoreList.Items {
|
||||
for i, restore := range restoreList.Items {
|
||||
if restore.Spec.BackupName != backup.Name {
|
||||
continue
|
||||
}
|
||||
restoreLog := log.WithField("restore", kube.NamespaceAndName(&restore))
|
||||
restoreLog := log.WithField("restore", kube.NamespaceAndName(&restoreList.Items[i]))
|
||||
|
||||
restoreLog.Info("Deleting restore log/results from backup storage")
|
||||
if err := backupStore.DeleteRestore(restore.Name); err != nil {
|
||||
@@ -339,8 +339,8 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
||||
}
|
||||
|
||||
restoreLog.Info("Deleting restore referencing backup")
|
||||
if err := r.Delete(ctx, &restore); err != nil {
|
||||
errs = append(errs, errors.Wrapf(err, "error deleting restore %s", kube.NamespaceAndName(&restore)).Error())
|
||||
if err := r.Delete(ctx, &restoreList.Items[i]); err != nil {
|
||||
errs = append(errs, errors.Wrapf(err, "error deleting restore %s", kube.NamespaceAndName(&restoreList.Items[i])).Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -423,11 +423,11 @@ func (r *backupDeletionReconciler) deleteExistingDeletionRequests(ctx context.Co
|
||||
return []error{errors.Wrap(err, "error listing existing DeleteBackupRequests for backup")}
|
||||
}
|
||||
var errs []error
|
||||
for _, dbr := range dbrList.Items {
|
||||
for i, dbr := range dbrList.Items {
|
||||
if dbr.Name == req.Name {
|
||||
continue
|
||||
}
|
||||
if err := r.Delete(ctx, &dbr); err != nil {
|
||||
if err := r.Delete(ctx, &dbrList.Items[i]); err != nil {
|
||||
errs = append(errs, errors.WithStack(err))
|
||||
} else {
|
||||
log.Infof("deletion request '%s' removed.", dbr.Name)
|
||||
|
||||
@@ -317,13 +317,13 @@ func (b *backupSyncReconciler) deleteOrphanedBackups(ctx context.Context, locati
|
||||
return
|
||||
}
|
||||
|
||||
for _, backup := range backupList.Items {
|
||||
for i, backup := range backupList.Items {
|
||||
log = log.WithField("backup", backup.Name)
|
||||
if backup.Status.Phase != velerov1api.BackupPhaseCompleted || backupStoreBackups.Has(backup.Name) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := b.client.Delete(ctx, &backup, &client.DeleteOptions{}); err != nil {
|
||||
if err := b.client.Delete(ctx, &backupList.Items[i], &client.DeleteOptions{}); err != nil {
|
||||
log.WithError(errors.WithStack(err)).Error("Error deleting orphaned backup from cluster")
|
||||
} else {
|
||||
log.Debug("Deleted orphaned backup from cluster")
|
||||
@@ -344,10 +344,10 @@ func (b *backupSyncReconciler) deleteCSISnapshotsByBackup(ctx context.Context, b
|
||||
if err := b.client.List(ctx, &vsList, listOptions); err != nil {
|
||||
log.WithError(err).Warnf("Failed to list volumesnapshots for backup: %s, the deletion will be skipped", backupName)
|
||||
} else {
|
||||
for _, vs := range vsList.Items {
|
||||
for i, vs := range vsList.Items {
|
||||
name := kube.NamespaceAndName(vs.GetObjectMeta())
|
||||
log.Debugf("Deleting volumesnapshot %s", name)
|
||||
if err := b.client.Delete(context.TODO(), &vs); err != nil {
|
||||
if err := b.client.Delete(context.TODO(), &vsList.Items[i]); err != nil {
|
||||
log.WithError(err).Warnf("Failed to delete volumesnapshot %s", name)
|
||||
}
|
||||
}
|
||||
@@ -379,11 +379,13 @@ func backupSyncSourceOrderFunc(objList client.ObjectList) client.ObjectList {
|
||||
bslArray = append(bslArray, &inputBSLList.Items[i])
|
||||
// append everything before the default
|
||||
for _, bsl := range inputBSLList.Items[:i] {
|
||||
bslArray = append(bslArray, &bsl)
|
||||
cpBsl := bsl
|
||||
bslArray = append(bslArray, &cpBsl)
|
||||
}
|
||||
// append everything after the default
|
||||
for _, bsl := range inputBSLList.Items[i+1:] {
|
||||
bslArray = append(bslArray, &bsl)
|
||||
cpBsl := bsl
|
||||
bslArray = append(bslArray, &cpBsl)
|
||||
}
|
||||
meta.SetList(resultBSLList, bslArray)
|
||||
|
||||
|
||||
@@ -302,7 +302,7 @@ func (c *PodVolumeRestoreReconciler) processRestore(ctx context.Context, req *ve
|
||||
// Write a done file with name=<restore-uid> into the just-created .velero dir
|
||||
// within the volume. The velero init container on the pod is waiting
|
||||
// for this file to exist in each restored volume before completing.
|
||||
if err := ioutil.WriteFile(filepath.Join(volumePath, ".velero", string(restoreUID)), nil, 0644); err != nil {
|
||||
if err := ioutil.WriteFile(filepath.Join(volumePath, ".velero", string(restoreUID)), nil, 0644); err != nil { //nolint:gosec
|
||||
return errors.Wrap(err, "error writing done file")
|
||||
}
|
||||
|
||||
|
||||
@@ -463,7 +463,7 @@ func (c *restoreController) fetchBackupInfo(backupName string, pluginManager cli
|
||||
func (c *restoreController) runValidatedRestore(restore *api.Restore, info backupInfo) error {
|
||||
// instantiate the per-restore logger that will output both to a temp file
|
||||
// (for upload to object storage) and to stdout.
|
||||
restoreLog, err := newRestoreLogger(restore, c.logger, c.restoreLogLevel, c.logFormat)
|
||||
restoreLog, err := newRestoreLogger(restore, c.restoreLogLevel, c.logFormat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -577,14 +577,14 @@ func (c *restoreController) runValidatedRestore(restore *api.Restore, info backu
|
||||
"errors": restoreErrors,
|
||||
}
|
||||
|
||||
if err := putResults(restore, m, info.backupStore, c.logger); err != nil {
|
||||
if err := putResults(restore, m, info.backupStore); err != nil {
|
||||
c.logger.WithError(err).Error("Error uploading restore results to backup storage")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func putResults(restore *api.Restore, results map[string]pkgrestore.Result, backupStore persistence.BackupStore, log logrus.FieldLogger) error {
|
||||
func putResults(restore *api.Restore, results map[string]pkgrestore.Result, backupStore persistence.BackupStore) error {
|
||||
buf := new(bytes.Buffer)
|
||||
gzw := gzip.NewWriter(buf)
|
||||
defer gzw.Close()
|
||||
@@ -669,7 +669,7 @@ type restoreLogger struct {
|
||||
w *gzip.Writer
|
||||
}
|
||||
|
||||
func newRestoreLogger(restore *api.Restore, baseLogger logrus.FieldLogger, logLevel logrus.Level, logFormat logging.Format) (*restoreLogger, error) {
|
||||
func newRestoreLogger(restore *api.Restore, logLevel logrus.Level, logFormat logging.Format) (*restoreLogger, error) {
|
||||
file, err := ioutil.TempFile("", "")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error creating temp file")
|
||||
|
||||
@@ -61,12 +61,12 @@ func IsRunningInNode(ctx context.Context, namespace string, nodeName string, pod
|
||||
return errors.Wrap(err, "failed to list daemonset pods")
|
||||
}
|
||||
|
||||
for _, pod := range pods.Items {
|
||||
if kube.IsPodRunning(&pod) != nil {
|
||||
for i := range pods.Items {
|
||||
if kube.IsPodRunning(&pods.Items[i]) != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if pod.Spec.NodeName == nodeName {
|
||||
if pods.Items[i].Spec.NodeName == nodeName {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ func (b *clientBuilder) clientConfig() *hcplugin.ClientConfig {
|
||||
string(common.PluginKindItemSnapshotter): framework.NewItemSnapshotterPlugin(common.ClientLogger(b.clientLogger)),
|
||||
},
|
||||
Logger: b.pluginLogger,
|
||||
Cmd: exec.Command(b.commandName, b.commandArgs...),
|
||||
Cmd: exec.Command(b.commandName, b.commandArgs...), //nolint
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
//nolint:gosec
|
||||
package config
|
||||
|
||||
import (
|
||||
|
||||
@@ -50,18 +50,18 @@ func getSubscriptionID(config map[string]string) string {
|
||||
return os.Getenv(subscriptionIDEnvVar)
|
||||
}
|
||||
|
||||
func getStorageAccountKey(config map[string]string) (string, *azure.Environment, error) {
|
||||
func getStorageAccountKey(config map[string]string) (string, error) {
|
||||
credentialsFile := selectCredentialsFile(config)
|
||||
|
||||
if err := loadCredentialsIntoEnv(credentialsFile); err != nil {
|
||||
return "", nil, err
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Get Azure cloud from AZURE_CLOUD_NAME, if it exists. If the env var does not
|
||||
// exist, parseAzureEnvironment will return azure.PublicCloud.
|
||||
env, err := parseAzureEnvironment(os.Getenv(cloudNameEnvVar))
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrap(err, "unable to parse azure cloud name environment variable")
|
||||
return "", errors.Wrap(err, "unable to parse azure cloud name environment variable")
|
||||
}
|
||||
|
||||
// Get storage key from secret using key config[storageAccountKeyEnvVarConfigKey]. If the config does not
|
||||
@@ -69,21 +69,21 @@ func getStorageAccountKey(config map[string]string) (string, *azure.Environment,
|
||||
if secretKeyEnvVar := config[storageAccountKeyEnvVarConfigKey]; secretKeyEnvVar != "" {
|
||||
storageKey := os.Getenv(secretKeyEnvVar)
|
||||
if storageKey == "" {
|
||||
return "", env, errors.Errorf("no storage key secret with key %s found", secretKeyEnvVar)
|
||||
return "", errors.Errorf("no storage key secret with key %s found", secretKeyEnvVar)
|
||||
}
|
||||
|
||||
return storageKey, env, nil
|
||||
return storageKey, nil
|
||||
}
|
||||
|
||||
// get subscription ID from object store config or AZURE_SUBSCRIPTION_ID environment variable
|
||||
subscriptionID := getSubscriptionID(config)
|
||||
if subscriptionID == "" {
|
||||
return "", nil, errors.New("azure subscription ID not found in object store's config or in environment variable")
|
||||
return "", errors.New("azure subscription ID not found in object store's config or in environment variable")
|
||||
}
|
||||
|
||||
// we need config["resourceGroup"], config["storageAccount"]
|
||||
if _, err := getRequiredValues(mapLookup(config), resourceGroupConfigKey, storageAccountConfigKey); err != nil {
|
||||
return "", env, errors.Wrap(err, "unable to get all required config values")
|
||||
if err := getRequiredValues(mapLookup(config), resourceGroupConfigKey, storageAccountConfigKey); err != nil {
|
||||
return "", errors.Wrap(err, "unable to get all required config values")
|
||||
}
|
||||
|
||||
// get authorizer from environment in the following order:
|
||||
@@ -93,7 +93,7 @@ func getStorageAccountKey(config map[string]string) (string, *azure.Environment,
|
||||
// 4. MSI (managed service identity)
|
||||
authorizer, err := auth.NewAuthorizerFromEnvironment()
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrap(err, "error getting authorizer from environment")
|
||||
return "", errors.Wrap(err, "error getting authorizer from environment")
|
||||
}
|
||||
|
||||
// get storageAccountsClient
|
||||
@@ -103,10 +103,10 @@ func getStorageAccountKey(config map[string]string) (string, *azure.Environment,
|
||||
// get storage key
|
||||
res, err := storageAccountsClient.ListKeys(context.TODO(), config[resourceGroupConfigKey], config[storageAccountConfigKey], storagemgmt.Kerb)
|
||||
if err != nil {
|
||||
return "", env, errors.WithStack(err)
|
||||
return "", errors.WithStack(err)
|
||||
}
|
||||
if res.Keys == nil || len(*res.Keys) == 0 {
|
||||
return "", env, errors.New("No storage keys found")
|
||||
return "", errors.New("No storage keys found")
|
||||
}
|
||||
|
||||
var storageKey string
|
||||
@@ -120,10 +120,10 @@ func getStorageAccountKey(config map[string]string) (string, *azure.Environment,
|
||||
}
|
||||
|
||||
if storageKey == "" {
|
||||
return "", env, errors.New("No storage key with Full permissions found")
|
||||
return "", errors.New("No storage key with Full permissions found")
|
||||
}
|
||||
|
||||
return storageKey, env, nil
|
||||
return storageKey, nil
|
||||
}
|
||||
|
||||
func mapLookup(data map[string]string) func(string) string {
|
||||
@@ -136,12 +136,12 @@ func mapLookup(data map[string]string) func(string) string {
|
||||
// relies on (AZURE_ACCOUNT_NAME and AZURE_ACCOUNT_KEY) based
|
||||
// on info in the provided object storage location config map.
|
||||
func GetAzureResticEnvVars(config map[string]string) (map[string]string, error) {
|
||||
storageAccountKey, _, err := getStorageAccountKey(config)
|
||||
storageAccountKey, err := getStorageAccountKey(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := getRequiredValues(mapLookup(config), storageAccountConfigKey); err != nil {
|
||||
if err := getRequiredValues(mapLookup(config), storageAccountConfigKey); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to get all required config values")
|
||||
}
|
||||
|
||||
@@ -191,7 +191,7 @@ func parseAzureEnvironment(cloudName string) (*azure.Environment, error) {
|
||||
return &env, errors.WithStack(err)
|
||||
}
|
||||
|
||||
func getRequiredValues(getValue func(string) string, keys ...string) (map[string]string, error) {
|
||||
func getRequiredValues(getValue func(string) string, keys ...string) error {
|
||||
missing := []string{}
|
||||
results := map[string]string{}
|
||||
|
||||
@@ -204,10 +204,10 @@ func getRequiredValues(getValue func(string) string, keys ...string) (map[string
|
||||
}
|
||||
|
||||
if len(missing) > 0 {
|
||||
return nil, errors.Errorf("the following keys do not have values: %s", strings.Join(missing, ", "))
|
||||
return errors.Errorf("the following keys do not have values: %s", strings.Join(missing, ", "))
|
||||
}
|
||||
|
||||
return results, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAzureStorageDomain gets the Azure storage domain required by a Azure blob connection,
|
||||
@@ -221,7 +221,7 @@ func GetAzureStorageDomain(config map[string]string) string {
|
||||
}
|
||||
|
||||
func GetAzureCredentials(config map[string]string) (string, string, error) {
|
||||
storageAccountKey, _, err := getStorageAccountKey(config)
|
||||
storageAccountKey, err := getStorageAccountKey(config)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
//nolint:gosec
|
||||
package config
|
||||
|
||||
import "os"
|
||||
|
||||
@@ -14,6 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
//nolint:gosec
|
||||
package keys
|
||||
|
||||
import (
|
||||
|
||||
@@ -280,12 +280,12 @@ func (urp *unifiedRepoProvider) DefaultMaintenanceFrequency(ctx context.Context,
|
||||
}
|
||||
|
||||
func (urp *unifiedRepoProvider) GetPassword(param interface{}) (string, error) {
|
||||
repoParam, ok := param.(RepoParam)
|
||||
_, ok := param.(RepoParam)
|
||||
if !ok {
|
||||
return "", errors.Errorf("invalid parameter, expect %T, actual %T", RepoParam{}, param)
|
||||
}
|
||||
|
||||
repoPassword, err := getRepoPassword(urp.credentialGetter.FromSecret, repoParam)
|
||||
repoPassword, err := getRepoPassword(urp.credentialGetter.FromSecret)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "error to get repo password")
|
||||
}
|
||||
@@ -330,7 +330,7 @@ func (urp *unifiedRepoProvider) GetStoreOptions(param interface{}) (map[string]s
|
||||
return storeOptions, nil
|
||||
}
|
||||
|
||||
func getRepoPassword(secretStore credentials.SecretStore, param RepoParam) (string, error) {
|
||||
func getRepoPassword(secretStore credentials.SecretStore) (string, error) {
|
||||
if secretStore == nil {
|
||||
return "", errors.New("invalid credentials interface")
|
||||
}
|
||||
|
||||
@@ -506,7 +506,7 @@ func TestGetRepoPassword(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
password, err := getRepoPassword(urp.credentialGetter.FromSecret, RepoParam{})
|
||||
password, err := getRepoPassword(urp.credentialGetter.FromSecret)
|
||||
|
||||
require.Equal(t, tc.expected, password)
|
||||
|
||||
|
||||
@@ -77,7 +77,7 @@ func (c *Command) String() string {
|
||||
// Cmd returns an exec.Cmd for the command.
|
||||
func (c *Command) Cmd() *exec.Cmd {
|
||||
parts := c.StringSlice()
|
||||
cmd := exec.Command(parts[0], parts[1:]...)
|
||||
cmd := exec.Command(parts[0], parts[1:]...) //nolint
|
||||
cmd.Dir = c.Dir
|
||||
|
||||
if len(c.Env) > 0 {
|
||||
|
||||
@@ -70,9 +70,9 @@ func (a *AdmissionWebhookConfigurationAction) Execute(input *velero.RestoreItemA
|
||||
return velero.NewRestoreItemActionExecuteOutput(input.Item), nil
|
||||
}
|
||||
newWebhooks := make([]interface{}, 0)
|
||||
for i, entry := range webhooks {
|
||||
for i := range webhooks {
|
||||
logger2 := logger.WithField("index", i)
|
||||
obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&entry)
|
||||
obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&webhooks[i])
|
||||
if err != nil {
|
||||
logger2.Errorf("failed to convert the webhook entry, error: %v, it will be dropped", err)
|
||||
continue
|
||||
|
||||
@@ -1523,7 +1523,7 @@ func shouldRenamePV(ctx *restoreContext, obj *unstructured.Unstructured, client
|
||||
// remapClaimRefNS remaps a PersistentVolume's claimRef.Namespace based on a
|
||||
// restore's NamespaceMappings, if necessary. Returns true if the namespace was
|
||||
// remapped, false if it was not required.
|
||||
func remapClaimRefNS(ctx *restoreContext, obj *unstructured.Unstructured) (bool, error) {
|
||||
func remapClaimRefNS(ctx *restoreContext, obj *unstructured.Unstructured) (bool, error) { //nolint:unparam
|
||||
if len(ctx.restore.Spec.NamespaceMapping) == 0 {
|
||||
ctx.log.Debug("Persistent volume does not need to have the claimRef.namespace remapped because restore is not remapping any namespaces")
|
||||
return false, nil
|
||||
|
||||
@@ -123,9 +123,9 @@ func EnsureNamespaceExistsAndIsReady(namespace *corev1api.Namespace, client core
|
||||
func GetVolumeDirectory(ctx context.Context, log logrus.FieldLogger, pod *corev1api.Pod, volumeName string, cli client.Client) (string, error) {
|
||||
var volume *corev1api.Volume
|
||||
|
||||
for _, item := range pod.Spec.Volumes {
|
||||
if item.Name == volumeName {
|
||||
volume = &item
|
||||
for i := range pod.Spec.Volumes {
|
||||
if pod.Spec.Volumes[i].Name == volumeName {
|
||||
volume = &pod.Spec.Volumes[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user