mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-08 06:15:40 +00:00
Enable the E2E test on Github Action
1. Run the E2E test with kind(provision various versions of k8s cluster) and MinIO on Github Action 2. Bug fix: the variable "stdoutBuf" is assigned to both "installPluginCmd.Stdout" and "installPluginCmd.Stderr", this causes 'if !strings.Contains(stderrBuf.String(), "Duplicate value")' takes no effect as the "stderrBuf.String()" is always empty 3. Print the stdout and stderr for easy debugging Signed-off-by: Wenkai Yin(尹文开) <yinw@vmware.com>
This commit is contained in:
113
.github/workflows/e2e-test-kind.yaml
vendored
Normal file
113
.github/workflows/e2e-test-kind.yaml
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
name: "Run the E2E test on kind"
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
# Do not run when the change only includes these directories.
|
||||
paths-ignore:
|
||||
- "site/**"
|
||||
- "design/**"
|
||||
jobs:
|
||||
# Build the Velero CLI and image once for all Kubernetes versions, and cache it so the fan-out workers can get it.
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# Look for a CLI that's made for this PR
|
||||
- name: Fetch built CLI
|
||||
id: cli-cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ./_output/bin/linux/amd64/velero
|
||||
# The cache key a combination of the current PR number and the commit SHA
|
||||
key: velero-cli-${{ github.event.pull_request.number }}-${{ github.sha }}
|
||||
- name: Fetch built image
|
||||
id: image-cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ./velero.tar
|
||||
# The cache key a combination of the current PR number and the commit SHA
|
||||
key: velero-image-${{ github.event.pull_request.number }}-${{ github.sha }}
|
||||
- name: Fetch cached go modules
|
||||
uses: actions/cache@v2
|
||||
if: steps.cli-cache.outputs.cache-hit != 'true'
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v2
|
||||
if: steps.cli-cache.outputs.cache-hit != 'true' || steps.image-cache.outputs.cache-hit != 'true'
|
||||
# If no binaries were built for this PR, build it now.
|
||||
- name: Build Velero CLI
|
||||
if: steps.cli-cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
make local
|
||||
# If no image were built for this PR, build it now.
|
||||
- name: Build Velero Image
|
||||
if: steps.image-cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
IMAGE=velero VERSION=pr-test make container
|
||||
docker save velero:pr-test -o ./velero.tar
|
||||
# Run E2E test against all kubernetes versions on kind
|
||||
run-e2e-test:
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
k8s:
|
||||
# doesn't cover 1.15 as 1.15 doesn't support "apiextensions.k8s.io/v1" that is needed for the case
|
||||
#- 1.15.12
|
||||
- 1.16.15
|
||||
- 1.17.17
|
||||
- 1.18.15
|
||||
- 1.19.7
|
||||
- 1.20.2
|
||||
- 1.21.1
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install MinIO
|
||||
run:
|
||||
docker run -d --rm -p 9000:9000 -e "MINIO_ACCESS_KEY=minio" -e "MINIO_SECRET_KEY=minio123" -e "MINIO_DEFAULT_BUCKETS=bucket,additional-bucket" bitnami/minio:2021.6.17-debian-10-r7
|
||||
- uses: engineerd/setup-kind@v0.5.0
|
||||
with:
|
||||
version: "v0.11.1"
|
||||
image: "kindest/node:v${{ matrix.k8s }}"
|
||||
- name: Fetch built CLI
|
||||
id: cli-cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ./_output/bin/linux/amd64/velero
|
||||
key: velero-cli-${{ github.event.pull_request.number }}-${{ github.sha }}
|
||||
- name: Fetch built Image
|
||||
id: image-cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ./velero.tar
|
||||
key: velero-image-${{ github.event.pull_request.number }}-${{ github.sha }}
|
||||
- name: Load Velero Image
|
||||
run:
|
||||
kind load image-archive velero.tar
|
||||
# always try to fetch the cached go modules as the e2e test needs it either
|
||||
- name: Fetch cached go modules
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- name: Run E2E test
|
||||
run: |
|
||||
cat << EOF > /tmp/credential
|
||||
[default]
|
||||
aws_access_key_id=minio
|
||||
aws_secret_access_key=minio123
|
||||
EOF
|
||||
GOPATH=~/go CLOUD_PROVIDER=kind \
|
||||
OBJECT_STORE_PROVIDER=aws BSL_CONFIG=region=minio,s3ForcePathStyle="true",s3Url=http://$(hostname -i):9000 \
|
||||
CREDS_FILE=/tmp/credential BSL_BUCKET=bucket \
|
||||
ADDITIONAL_OBJECT_STORE_PROVIDER=aws ADDITIONAL_BSL_CONFIG=region=minio,s3ForcePathStyle="true",s3Url=http://$(hostname -i):9000 \
|
||||
ADDITIONAL_CREDS_FILE=/tmp/credential ADDITIONAL_BSL_BUCKET=additional-bucket \
|
||||
VELERO_IMAGE=velero:pr-test \
|
||||
make -C test/e2e run
|
||||
1
changelogs/unreleased/3912-ywk253100
Normal file
1
changelogs/unreleased/3912-ywk253100
Normal file
@@ -0,0 +1 @@
|
||||
Run the E2E test with kind(provision various versions of k8s cluster) and MinIO on Github Action
|
||||
@@ -18,7 +18,6 @@ package e2e
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"time"
|
||||
@@ -40,29 +39,23 @@ func installKibishii(ctx context.Context, namespace string, cloudPlatform string
|
||||
// We use kustomize to generate YAML for Kibishii from the checked-in yaml directories
|
||||
kibishiiInstallCmd := exec.CommandContext(ctx, "kubectl", "apply", "-n", namespace, "-k",
|
||||
"github.com/vmware-tanzu-experiments/distributed-data-generator/kubernetes/yaml/"+cloudPlatform)
|
||||
|
||||
_, _, err := veleroexec.RunCommand(kibishiiInstallCmd)
|
||||
_, stderr, err := veleroexec.RunCommand(kibishiiInstallCmd)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to install kibishii")
|
||||
return errors.Wrapf(err, "failed to install kibishii, stderr=%s", stderr)
|
||||
}
|
||||
|
||||
kibishiiSetWaitCmd := exec.CommandContext(ctx, "kubectl", "rollout", "status", "statefulset.apps/kibishii-deployment",
|
||||
"-n", namespace, "-w", "--timeout=30m")
|
||||
kibishiiSetWaitCmd.Stdout = os.Stdout
|
||||
kibishiiSetWaitCmd.Stderr = os.Stderr
|
||||
_, _, err = veleroexec.RunCommand(kibishiiSetWaitCmd)
|
||||
|
||||
_, stderr, err = veleroexec.RunCommand(kibishiiSetWaitCmd)
|
||||
if err != nil {
|
||||
return err
|
||||
return errors.Wrapf(err, "failed to rollout, stderr=%s", stderr)
|
||||
}
|
||||
|
||||
fmt.Printf("Waiting for kibishii jump-pad pod to be ready\n")
|
||||
jumpPadWaitCmd := exec.CommandContext(ctx, "kubectl", "wait", "--for=condition=ready", "-n", namespace, "pod/jump-pad")
|
||||
jumpPadWaitCmd.Stdout = os.Stdout
|
||||
jumpPadWaitCmd.Stderr = os.Stderr
|
||||
_, _, err = veleroexec.RunCommand(jumpPadWaitCmd)
|
||||
_, stderr, err = veleroexec.RunCommand(jumpPadWaitCmd)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Failed to wait for ready status of pod %s/%s", namespace, jumpPadPod)
|
||||
return errors.Wrapf(err, "Failed to wait for ready status of pod %s/%s, stderr=%s", namespace, jumpPadPod, stderr)
|
||||
}
|
||||
|
||||
return err
|
||||
@@ -75,9 +68,9 @@ func generateData(ctx context.Context, namespace string, levels int, filesPerLev
|
||||
strconv.Itoa(blockSize), strconv.Itoa(passNum), strconv.Itoa(expectedNodes))
|
||||
fmt.Printf("kibishiiGenerateCmd cmd =%v\n", kibishiiGenerateCmd)
|
||||
|
||||
_, _, err := veleroexec.RunCommand(kibishiiGenerateCmd)
|
||||
_, stderr, err := veleroexec.RunCommand(kibishiiGenerateCmd)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to generate")
|
||||
return errors.Wrapf(err, "failed to generate, stderr=%s", stderr)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -90,9 +83,9 @@ func verifyData(ctx context.Context, namespace string, levels int, filesPerLevel
|
||||
strconv.Itoa(blockSize), strconv.Itoa(passNum), strconv.Itoa(expectedNodes))
|
||||
fmt.Printf("kibishiiVerifyCmd cmd =%v\n", kibishiiVerifyCmd)
|
||||
|
||||
_, _, err := veleroexec.RunCommand(kibishiiVerifyCmd)
|
||||
_, stderr, err := veleroexec.RunCommand(kibishiiVerifyCmd)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to verify")
|
||||
return errors.Wrapf(err, "failed to verify, stderr=%s", stderr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -426,7 +426,7 @@ func veleroAddPluginsForProvider(ctx context.Context, veleroCLI string, veleroNa
|
||||
|
||||
installPluginCmd := exec.CommandContext(ctx, veleroCLI, "--namespace", veleroNamespace, "plugin", "add", plugin)
|
||||
installPluginCmd.Stdout = stdoutBuf
|
||||
installPluginCmd.Stderr = stdoutBuf
|
||||
installPluginCmd.Stderr = stderrBuf
|
||||
|
||||
err := installPluginCmd.Run()
|
||||
|
||||
|
||||
Reference in New Issue
Block a user