diff --git a/Makefile b/Makefile index 3884063c9..053e0331e 100644 --- a/Makefile +++ b/Makefile @@ -366,4 +366,4 @@ gen-docs: .PHONY: test-e2e test-e2e: local - $(MAKE) -C test/e2e run + $(MAKE) -e VERSION=$(VERSION) -C test/e2e run diff --git a/changelogs/unreleased/4058-danfengliu b/changelogs/unreleased/4058-danfengliu new file mode 100644 index 000000000..b1cdb0245 --- /dev/null +++ b/changelogs/unreleased/4058-danfengliu @@ -0,0 +1 @@ +Add upgrade test in E2E test \ No newline at end of file diff --git a/test/e2e/Makefile b/test/e2e/Makefile index 6f7140463..b6f6aaf78 100644 --- a/test/e2e/Makefile +++ b/test/e2e/Makefile @@ -48,7 +48,11 @@ OUTPUT_DIR := _output/$(GOOS)/$(GOARCH)/bin GINKGO_FOCUS ?= VELERO_CLI ?=$$(pwd)/../../_output/bin/$(GOOS)/$(GOARCH)/velero VELERO_IMAGE ?= velero/velero:main +VELERO_VERSION ?= $(VERSION) RESTIC_HELPER_IMAGE ?= +#Released version only +UPGRADE_FROM_VELERO_CLI ?= +UPGRADE_FROM_VELERO_VERSION ?= v1.6.3 CRDS_VERSION ?= v1 VELERO_NAMESPACE ?= velero CREDS_FILE ?= @@ -79,10 +83,13 @@ run: ginkgo @[ "${BSL_BUCKET}" ] && echo "Using bucket ${BSL_BUCKET} to store backups from E2E tests" || \ (echo "Bucket to store the backups from E2E tests is required, please re-run with BSL_BUCKET="; exit 1 ) @[ "${CLOUD_PROVIDER}" ] && echo "Using cloud provider ${CLOUD_PROVIDER}" || \ - (echo "Cloud provider for target cloud/plug-in provider is required, please rerun with CLOUD_PROVIDER="; exit 1) + (echo "Cloud provider for target cloud/plug-in provider is required, please rerun with CLOUD_PROVIDER="; exit 1) @$(GINKGO) -v -focus="$(GINKGO_FOCUS)" . -- -velerocli=$(VELERO_CLI) \ -velero-image=$(VELERO_IMAGE) \ + -velero-version=$(VELERO_VERSION) \ -restic-helper-image=$(RESTIC_HELPER_IMAGE) \ + -upgrade-from-velero-cli=$(UPGRADE_FROM_VELERO_CLI) \ + -upgrade-from-velero-version=$(UPGRADE_FROM_VELERO_VERSION) \ -velero-namespace=$(VELERO_NAMESPACE) \ -crds-version=$(CRDS_VERSION) \ -credentials-file=$(CREDS_FILE) \ diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 8cc3dfe96..81d41f790 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -26,9 +26,10 @@ import ( ) var ( - veleroCLI, veleroImage, cloudCredentialsFile, bslConfig, bslBucket, bslPrefix, vslConfig, cloudProvider, objectStoreProvider, veleroNamespace, crdsVersion string - additionalBSLProvider, additionalBSLBucket, additionalBSLPrefix, additionalBSLConfig, additionalBSLCredentials, registryCredentialFile, resticHelperImage string - installVelero bool + veleroCLI, veleroImage, veleroVersion, cloudCredentialsFile, bslConfig, bslBucket, bslPrefix, vslConfig, cloudProvider, objectStoreProvider, veleroNamespace, crdsVersion string + additionalBSLProvider, additionalBSLBucket, additionalBSLPrefix, additionalBSLConfig, additionalBSLCredentials, registryCredentialFile, resticHelperImage string + upgradeFromVeleroVersion, upgradeFromVeleroCLI string + installVelero bool ) func init() { @@ -38,7 +39,10 @@ func init() { flag.StringVar(&cloudCredentialsFile, "credentials-file", "", "file containing credentials for backup and volume provider. Required.") flag.StringVar(&veleroCLI, "velerocli", "velero", "path to the velero application to use.") flag.StringVar(&veleroImage, "velero-image", "velero/velero:main", "image for the velero server to be tested.") + flag.StringVar(&veleroVersion, "velero-version", "main", "image for the velero server to be tested.") flag.StringVar(&resticHelperImage, "restic-helper-image", "", "image for the velero restic restore helper to be tested.") + flag.StringVar(&upgradeFromVeleroCLI, "upgrade-from-velero-cli", "", "path to the pre-upgrade velero application to use.") + flag.StringVar(&upgradeFromVeleroVersion, "upgrade-from-velero-version", "v1.6.3", "image for the pre-upgrade velero server to be tested.") flag.StringVar(&bslConfig, "bsl-config", "", "configuration to use for the backup storage location. Format is key1=value1,key2=value2") flag.StringVar(&bslPrefix, "prefix", "", "prefix under which all Velero data should be stored within the bucket. Optional.") flag.StringVar(&vslConfig, "vsl-config", "", "configuration to use for the volume snapshot location. Format is key1=value1,key2=value2") diff --git a/test/e2e/install.go b/test/e2e/install.go index 61dbce07f..7dc7a63a8 100644 --- a/test/e2e/install.go +++ b/test/e2e/install.go @@ -302,8 +302,10 @@ func toUnstructured(res interface{}) (unstructured.Unstructured, error) { func waitVeleroReady(ctx context.Context, namespace string, useRestic bool) error { fmt.Println("Waiting for Velero deployment to be ready.") - stdout, stderr, err := velerexec.RunCommand(exec.CommandContext(ctx, "kubectl", "wait", "--for=condition=available", - "deployment/velero", "-n", namespace, "--timeout=600s")) + // when doing upgrade by the "kubectl apply" the command "kubectl wait --for=condition=available deployment/velero -n velero --timeout=600s" returns directly + // use "rollout status" instead to avoid this. For more detail information, refer to https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#complete-deployment + stdout, stderr, err := velerexec.RunCommand(exec.CommandContext(ctx, "kubectl", "rollout", "status", + "deployment/velero", "-n", namespace)) if err != nil { return errors.Wrapf(err, "fail to wait for the velero deployment ready, stdout=%s, stderr=%s", stdout, stderr) } diff --git a/test/e2e/kibishii_tests.go b/test/e2e/kibishii_utils.go similarity index 90% rename from test/e2e/kibishii_tests.go rename to test/e2e/kibishii_utils.go index d15893d72..e4e962c53 100644 --- a/test/e2e/kibishii_tests.go +++ b/test/e2e/kibishii_utils.go @@ -33,6 +33,54 @@ const ( jumpPadPod = "jump-pad" ) +// runKibishiiTests runs kibishii tests on the provider. +func runKibishiiTests(client testClient, providerName, veleroCLI, veleroNamespace, backupName, restoreName, backupLocation string, + useVolumeSnapshots bool, registryCredentialFile string) error { + oneHourTimeout, _ := context.WithTimeout(context.Background(), time.Minute*60) + + if err := createNamespace(oneHourTimeout, client, kibishiiNamespace); err != nil { + return errors.Wrapf(err, "Failed to create namespace %s to install Kibishii workload", kibishiiNamespace) + } + defer func() { + if err := deleteNamespace(oneHourTimeout, client, kibishiiNamespace, true); err != nil { + fmt.Println(errors.Wrapf(err, "failed to delete the namespace %q", kibishiiNamespace)) + } + }() + if err := kibishiiPrepareBeforeBackup(oneHourTimeout, client, providerName, kibishiiNamespace, registryCredentialFile); err != nil { + return errors.Wrapf(err, "Failed to install and prepare data for kibishii %s", kibishiiNamespace) + } + + if err := veleroBackupNamespace(oneHourTimeout, veleroCLI, veleroNamespace, backupName, kibishiiNamespace, backupLocation, useVolumeSnapshots); err != nil { + veleroBackupLogs(oneHourTimeout, veleroCLI, veleroNamespace, backupName) + return errors.Wrapf(err, "Failed to backup kibishii namespace %s", kibishiiNamespace) + } + + if providerName == "vsphere" && useVolumeSnapshots { + // Wait for uploads started by the Velero Plug-in for vSphere to complete + // TODO - remove after upload progress monitoring is implemented + fmt.Println("Waiting for vSphere uploads to complete") + if err := waitForVSphereUploadCompletion(oneHourTimeout, time.Hour, kibishiiNamespace); err != nil { + return errors.Wrapf(err, "Error waiting for uploads to complete") + } + } + fmt.Printf("Simulating a disaster by removing namespace %s\n", kibishiiNamespace) + if err := deleteNamespace(oneHourTimeout, client, kibishiiNamespace, true); err != nil { + return errors.Wrapf(err, "failed to delete namespace %s", kibishiiNamespace) + } + + if err := veleroRestore(oneHourTimeout, veleroCLI, veleroNamespace, restoreName, backupName); err != nil { + veleroRestoreLogs(oneHourTimeout, veleroCLI, veleroNamespace, restoreName) + return errors.Wrapf(err, "Restore %s failed from backup %s", restoreName, backupName) + } + + if err := kibishiiVerifyAfterRestore(client, kibishiiNamespace, oneHourTimeout); err != nil { + return errors.Wrapf(err, "Error verifying kibishii after restore") + } + + fmt.Printf("kibishii test completed successfully\n") + return nil +} + func installKibishii(ctx context.Context, namespace string, cloudPlatform string) error { // We use kustomize to generate YAML for Kibishii from the checked-in yaml directories kibishiiInstallCmd := exec.CommandContext(ctx, "kubectl", "apply", "-n", namespace, "-k", @@ -88,20 +136,12 @@ func verifyData(ctx context.Context, namespace string, levels int, filesPerLevel return nil } -// runKibishiiTests runs kibishii tests on the provider. -func runKibishiiTests(client testClient, providerName, veleroCLI, veleroNamespace, backupName, restoreName, backupLocation string, - useVolumeSnapshots bool, registryCredentialFile string) error { - oneHourTimeout, _ := context.WithTimeout(context.Background(), time.Minute*60) +func waitForKibishiiPods(ctx context.Context, client testClient, kibishiiNamespace string) error { + return waitForPods(ctx, client, kibishiiNamespace, []string{"jump-pad", "etcd0", "etcd1", "etcd2", "kibishii-deployment-0", "kibishii-deployment-1"}) +} + +func kibishiiPrepareBeforeBackup(oneHourTimeout context.Context, client testClient, providerName, kibishiiNamespace, registryCredentialFile string) error { serviceAccountName := "default" - if err := createNamespace(oneHourTimeout, client, kibishiiNamespace); err != nil { - return errors.Wrapf(err, "Failed to create namespace %s to install Kibishii workload", kibishiiNamespace) - } - defer func() { - // if other functions runs timeout, the defer has no change to run, so use a separated context rather than the "oneHourTimeout" to avoid this - if err := deleteNamespace(context.Background(), client, kibishiiNamespace, true); err != nil { - fmt.Println(errors.Wrapf(err, "failed to delete the namespace %q", kibishiiNamespace)) - } - }() // wait until the service account is created before patch the image pull secret if err := waitUntilServiceAccountCreated(oneHourTimeout, client, kibishiiNamespace, serviceAccountName, 10*time.Minute); err != nil { @@ -126,37 +166,10 @@ func runKibishiiTests(client testClient, providerName, veleroCLI, veleroNamespac if err := generateData(oneHourTimeout, kibishiiNamespace, 2, 10, 10, 1024, 1024, 0, 2); err != nil { return errors.Wrap(err, "Failed to generate data") } + return nil +} - if err := veleroBackupNamespace(oneHourTimeout, veleroCLI, veleroNamespace, backupName, kibishiiNamespace, backupLocation, useVolumeSnapshots); err != nil { - veleroBackupLogs(oneHourTimeout, veleroCLI, veleroNamespace, backupName) - return errors.Wrapf(err, "Failed to backup kibishii namespace %s", kibishiiNamespace) - } - - if providerName == "vsphere" && useVolumeSnapshots { - // Wait for uploads started by the Velero Plug-in for vSphere to complete - // TODO - remove after upload progress monitoring is implemented - fmt.Println("Waiting for vSphere uploads to complete") - if err := waitForVSphereUploadCompletion(oneHourTimeout, time.Hour, kibishiiNamespace); err != nil { - return errors.Wrapf(err, "Error waiting for uploads to complete") - } - } - fmt.Printf("Simulating a disaster by removing namespace %s\n", kibishiiNamespace) - if err := deleteNamespace(oneHourTimeout, client, kibishiiNamespace, true); err != nil { - return errors.Wrapf(err, "failed to delete namespace %s", kibishiiNamespace) - } - - // the snapshots of AWS may be still in pending status when do the restore, wait for a while - // to avoid this https://github.com/vmware-tanzu/velero/issues/1799 - // TODO remove this after https://github.com/vmware-tanzu/velero/issues/3533 is fixed - if providerName == "aws" && useVolumeSnapshots { - fmt.Println("Waiting 5 minutes to make sure the snapshots are ready...") - time.Sleep(5 * time.Minute) - } - if err := veleroRestore(oneHourTimeout, veleroCLI, veleroNamespace, restoreName, backupName); err != nil { - veleroRestoreLogs(oneHourTimeout, veleroCLI, veleroNamespace, restoreName) - return errors.Wrapf(err, "Restore %s failed from backup %s", restoreName, backupName) - } - +func kibishiiVerifyAfterRestore(client testClient, kibishiiNamespace string, oneHourTimeout context.Context) error { // wait for kibishii pod startup // TODO - Fix kibishii so we can check that it is ready to go fmt.Printf("Waiting for kibishii pods to be ready\n") @@ -169,11 +182,5 @@ func runKibishiiTests(client testClient, providerName, veleroCLI, veleroNamespac if err := verifyData(oneHourTimeout, kibishiiNamespace, 2, 10, 10, 1024, 1024, 0, 2); err != nil { return errors.Wrap(err, "Failed to verify data generated by kibishii") } - - fmt.Printf("kibishii test completed successfully\n") return nil } - -func waitForKibishiiPods(ctx context.Context, client testClient, kibishiiNamespace string) error { - return waitForPods(ctx, client, kibishiiNamespace, []string{"jump-pad", "etcd0", "etcd1", "etcd2", "kibishii-deployment-0", "kibishii-deployment-1"}) -} diff --git a/test/e2e/upgrade_test.go b/test/e2e/upgrade_test.go new file mode 100644 index 000000000..5eb80d108 --- /dev/null +++ b/test/e2e/upgrade_test.go @@ -0,0 +1,153 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package e2e + +import ( + "context" + "flag" + "fmt" + "time" + + "github.com/google/uuid" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/pkg/errors" +) + +const ( + upgradeNamespace = "upgrade-workload" +) + +// Upgrade test by Kibishi using restic +var _ = Describe("[Upgrade][Restic] Velero upgrade tests on cluster using the plugin provider for object storage and Restic for volume backups", backup_upgrade_restore_with_restic) + +var _ = Describe("[Upgrade][Snapshot] Velero upgrade tests on cluster using the plugin provider for object storage and snapshots for volume backups", backup_upgrade_restore_with_snapshots) + +func backup_upgrade_restore_with_snapshots() { + backup_upgrade_restore_test(true) +} + +func backup_upgrade_restore_with_restic() { + backup_upgrade_restore_test(false) +} + +func backup_upgrade_restore_test(useVolumeSnapshots bool) { + var ( + backupName, restoreName string + ) + upgradeFromVeleroCLI := upgradeFromVeleroCLI + + client, err := newTestClient() + Expect(err).To(Succeed(), "Failed to instantiate cluster client for backup tests") + + BeforeEach(func() { + if (len(upgradeFromVeleroVersion)) == 0 { + Skip("An original velero version is required to run upgrade test, please run test with upgrade-from-velero-version=") + } + if useVolumeSnapshots && cloudProvider == "kind" { + Skip("Volume snapshots not supported on kind") + } + //Assume tag of velero server image is identical to velero CLI version + //Download velero CLI if it's empty according to velero CLI version + if (len(upgradeFromVeleroCLI)) == 0 { + upgradeFromVeleroCLI, err = installVeleroCLI(upgradeFromVeleroVersion) + Expect(err).To(Succeed()) + } + + var err error + flag.Parse() + uuidgen, err = uuid.NewRandom() + Expect(err).To(Succeed()) + if installVelero { + //Set veleroImage and resticHelperImage to blank + //veleroImage and resticHelperImage should be the default value in originalCli + Expect(veleroInstall(context.Background(), upgradeFromVeleroCLI, "", "", veleroNamespace, cloudProvider, objectStoreProvider, useVolumeSnapshots, + cloudCredentialsFile, bslBucket, bslPrefix, bslConfig, vslConfig, "", "", registryCredentialFile)).To(Succeed()) + Expect(checkVeleroVersion(context.Background(), upgradeFromVeleroCLI, upgradeFromVeleroVersion)).To(Succeed()) + } else { + Skip("Upgrade test is skipped since user don't want to install any other velero") + } + }) + + AfterEach(func() { + if installVelero { + err = veleroUninstall(context.Background(), veleroCLI, veleroNamespace) + Expect(err).To(Succeed()) + } + }) + + When("kibishii is the sample workload", func() { + It("should be successfully backed up and restored to the default BackupStorageLocation", func() { + backupName = "backup-" + uuidgen.String() + restoreName = "restore-" + uuidgen.String() + Expect(runUpgradeTests(client, veleroImage, veleroVersion, cloudProvider, upgradeFromVeleroCLI, veleroNamespace, backupName, restoreName, "", useVolumeSnapshots, registryCredentialFile)).To(Succeed(), + "Failed to successfully backup and restore Kibishii namespace") + }) + }) +} + +// runUpgradeTests runs upgrade test on the provider by kibishii. +func runUpgradeTests(client testClient, upgradeToVeleroImage, upgradeToVeleroVersion, providerName, upgradeFromVeleroCLI, veleroNamespace, backupName, restoreName, backupLocation string, + useVolumeSnapshots bool, registryCredentialFile string) error { + oneHourTimeout, _ := context.WithTimeout(context.Background(), time.Minute*60) + if err := createNamespace(oneHourTimeout, client, upgradeNamespace); err != nil { + return errors.Wrapf(err, "Failed to create namespace %s to install Kibishii workload", upgradeNamespace) + } + defer func() { + if err := deleteNamespace(oneHourTimeout, client, upgradeNamespace, true); err != nil { + fmt.Println(errors.Wrapf(err, "failed to delete the namespace %q", upgradeNamespace)) + } + }() + if err := kibishiiPrepareBeforeBackup(oneHourTimeout, client, providerName, upgradeNamespace, registryCredentialFile); err != nil { + return errors.Wrapf(err, "Failed to install and prepare data for kibishii %s", upgradeNamespace) + } + + if err := veleroBackupNamespace(oneHourTimeout, upgradeFromVeleroCLI, veleroNamespace, backupName, upgradeNamespace, backupLocation, useVolumeSnapshots); err != nil { + veleroBackupLogs(oneHourTimeout, upgradeFromVeleroCLI, veleroNamespace, backupName) + return errors.Wrapf(err, "Failed to backup kibishii namespace %s", upgradeNamespace) + } + + if providerName == "vsphere" && useVolumeSnapshots { + // Wait for uploads started by the Velero Plug-in for vSphere to complete + // TODO - remove after upload progress monitoring is implemented + fmt.Println("Waiting for vSphere uploads to complete") + if err := waitForVSphereUploadCompletion(oneHourTimeout, time.Hour, upgradeNamespace); err != nil { + return errors.Wrapf(err, "Error waiting for uploads to complete") + } + } + fmt.Printf("Simulating a disaster by removing namespace %s\n", upgradeNamespace) + if err := deleteNamespace(oneHourTimeout, client, upgradeNamespace, true); err != nil { + return errors.Wrapf(err, "failed to delete namespace %s", upgradeNamespace) + } + if err := veleroInstall(context.Background(), veleroCLI, upgradeToVeleroImage, resticHelperImage, veleroNamespace, cloudProvider, objectStoreProvider, useVolumeSnapshots, + cloudCredentialsFile, bslBucket, bslPrefix, bslConfig, vslConfig, crdsVersion, "", registryCredentialFile); err != nil { + return errors.Wrapf(err, "Failed to install velero from image %s", upgradeToVeleroImage) + } + if err := checkVeleroVersion(context.Background(), veleroCLI, upgradeToVeleroVersion); err != nil { + return errors.Wrapf(err, "Velero install version mismatch.") + } + if err := veleroRestore(oneHourTimeout, veleroCLI, veleroNamespace, restoreName, backupName); err != nil { + veleroRestoreLogs(oneHourTimeout, veleroCLI, veleroNamespace, restoreName) + return errors.Wrapf(err, "Restore %s failed from backup %s", restoreName, backupName) + } + + if err := kibishiiVerifyAfterRestore(client, upgradeNamespace, oneHourTimeout); err != nil { + return errors.Wrapf(err, "Error verifying kibishii after restore") + } + + fmt.Printf("Upgrade test completed successfully\n") + return nil +} diff --git a/test/e2e/velero_utils.go b/test/e2e/velero_utils.go index f19841643..fd16edcea 100644 --- a/test/e2e/velero_utils.go +++ b/test/e2e/velero_utils.go @@ -22,9 +22,13 @@ import ( "encoding/json" "fmt" "io" + "io/ioutil" + "net/http" "os" "os/exec" "path/filepath" + "regexp" + "runtime" "strings" "time" @@ -393,3 +397,85 @@ func waitForVSphereUploadCompletion(ctx context.Context, timeout time.Duration, return err } + +func getVeleroVersion(ctx context.Context, veleroCLI string) (string, error) { + cmd := exec.CommandContext(ctx, veleroCLI, "version", "--timeout", "60s") + fmt.Println("Get Version Command:" + cmd.String()) + stdout, stderr, err := veleroexec.RunCommand(cmd) + if err != nil { + return "", errors.Wrapf(err, "failed to get velero version, stdout=%s, stderr=%s", stdout, stderr) + } + + output := strings.Replace(stdout, "\n", " ", -1) + fmt.Println("Version:" + output) + regCompiler := regexp.MustCompile(`(?i)client\s*:\s*version\s*:\s*(\S+).+server\s*:\s*version\s*:\s*(\S+)`) + versionMatches := regCompiler.FindStringSubmatch(output) + if len(versionMatches) < 3 { + return "", errors.New("Velero version command returned null version") + } + if versionMatches[1] != versionMatches[2] { + return "", errors.New("Velero server and client version are not matched") + } + return versionMatches[1], nil +} + +func checkVeleroVersion(ctx context.Context, veleroCLI string, expectedVer string) error { + tag := expectedVer + tagInstalled, err := getVeleroVersion(ctx, veleroCLI) + if err != nil { + return errors.WithMessagef(err, "Failed to get Velero version") + } + if strings.Trim(tag, " ") != strings.Trim(tagInstalled, " ") { + return errors.New(fmt.Sprintf("Velero version %s is not as expected %s", tagInstalled, tag)) + } + fmt.Printf("Velero version %s is as expected %s\n", tagInstalled, tag) + return nil +} + +func installVeleroCLI(version string) (string, error) { + name := "velero-" + version + "-" + runtime.GOOS + "-" + runtime.GOARCH + postfix := ".tar.gz" + tarball := name + postfix + tempFile, err := getVeleroCliTarball("https://github.com/vmware-tanzu/velero/releases/download/" + version + "/" + tarball) + if err != nil { + return "", errors.WithMessagef(err, "Failed to get Velero CLI tarball") + } + tempVeleroCliDir, err := ioutil.TempDir("", "velero-test") + if err != nil { + return "", errors.WithMessagef(err, "Failed to create temp dir for tarball extraction") + } + + cmd := exec.Command("tar", "-xvf", tempFile.Name(), "-C", tempVeleroCliDir) + defer os.Remove(tempFile.Name()) + + if _, err := cmd.Output(); err != nil { + return "", errors.WithMessagef(err, "Failed to extract file from velero CLI tarball") + } + return tempVeleroCliDir + "/" + name + "/velero", nil +} + +func getVeleroCliTarball(cliTarballUrl string) (*os.File, error) { + lastInd := strings.LastIndex(cliTarballUrl, "/") + tarball := cliTarballUrl[lastInd+1:] + + resp, err := http.Get(cliTarballUrl) + if err != nil { + return nil, errors.WithMessagef(err, "Failed to access Velero CLI tarball") + } + defer resp.Body.Close() + + tarballBuf, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.WithMessagef(err, "Failed to read buffer for tarball %s.", tarball) + } + tmpfile, err := ioutil.TempFile("", tarball) + if err != nil { + return nil, errors.WithMessagef(err, "Failed to create temp file for tarball %s locally.", tarball) + } + + if _, err := tmpfile.Write(tarballBuf); err != nil { + return nil, errors.WithMessagef(err, "Failed to write tarball file %s locally.", tarball) + } + + return tmpfile, nil +}