mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-10 15:07:29 +00:00
Compare commits
12 Commits
v1.16.1-rc
...
v1.11.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0da2baa908 | ||
|
|
8628388445 | ||
|
|
495063b4f6 | ||
|
|
87794d4615 | ||
|
|
c3e7fd7a74 | ||
|
|
5c0c378797 | ||
|
|
7d0d56e5fa | ||
|
|
3c9570fd14 | ||
|
|
971396110f | ||
|
|
9de61aa5a0 | ||
|
|
5f3cb25311 | ||
|
|
e16cb76892 |
2
.github/workflows/crds-verify-kind.yaml
vendored
2
.github/workflows/crds-verify-kind.yaml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19
|
||||
go-version: 1.19.8
|
||||
id: go
|
||||
# Look for a CLI that's made for this PR
|
||||
- name: Fetch built CLI
|
||||
|
||||
4
.github/workflows/e2e-test-kind.yaml
vendored
4
.github/workflows/e2e-test-kind.yaml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19
|
||||
go-version: 1.19.8
|
||||
id: go
|
||||
# Look for a CLI that's made for this PR
|
||||
- name: Fetch built CLI
|
||||
@@ -72,7 +72,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19
|
||||
go-version: 1.19.8
|
||||
id: go
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
2
.github/workflows/pr-ci-check.yml
vendored
2
.github/workflows/pr-ci-check.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19
|
||||
go-version: 1.19.8
|
||||
id: go
|
||||
- name: Check out the code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
2
.github/workflows/push.yml
vendored
2
.github/workflows/push.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19
|
||||
go-version: 1.19.8
|
||||
id: go
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
@@ -54,3 +54,10 @@ release:
|
||||
name: velero
|
||||
draft: true
|
||||
prerelease: auto
|
||||
|
||||
git:
|
||||
# What should be used to sort tags when gathering the current and previous
|
||||
# tags if there are more than one tag in the same commit.
|
||||
#
|
||||
# Default: `-version:refname`
|
||||
tag_sort: -version:creatordate
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
# Velero binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.19-bullseye as velero-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.19.8-bullseye as velero-builder
|
||||
|
||||
ARG GOPROXY
|
||||
ARG BIN
|
||||
@@ -44,7 +44,7 @@ RUN mkdir -p /output/usr/bin && \
|
||||
-ldflags "${LDFLAGS}" ${PKG}/cmd/${BIN}
|
||||
|
||||
# Restic binary build section
|
||||
FROM --platform=$BUILDPLATFORM golang:1.19-bullseye as restic-builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.19.8-bullseye as restic-builder
|
||||
|
||||
ARG BIN
|
||||
ARG TARGETOS
|
||||
@@ -66,7 +66,7 @@ RUN mkdir -p /output/usr/bin && \
|
||||
/go/src/github.com/vmware-tanzu/velero/hack/build-restic.sh
|
||||
|
||||
# Velero image packing section
|
||||
FROM gcr.io/distroless/base-nossl-debian11:nonroot
|
||||
FROM gcr.io/distroless/base-nossl-debian11@sha256:9523ef8cf054e23a81e722d231c6f604ab43a03c5b174b5c8386c78c0b6473d0
|
||||
|
||||
LABEL maintainer="Nolan Brubaker <brubakern@vmware.com>"
|
||||
|
||||
|
||||
2
Tiltfile
2
Tiltfile
@@ -50,7 +50,7 @@ git_sha = str(local("git rev-parse HEAD", quiet = True, echo_off = True)).strip(
|
||||
|
||||
tilt_helper_dockerfile_header = """
|
||||
# Tilt image
|
||||
FROM golang:1.19 as tilt-helper
|
||||
FROM golang:1.19.8 as tilt-helper
|
||||
|
||||
# Support live reloading with Tilt
|
||||
RUN wget --output-document /restart.sh --quiet https://raw.githubusercontent.com/windmilleng/rerun-process-wrapper/master/restart.sh && \
|
||||
|
||||
@@ -29,17 +29,23 @@ The Progress() and Cancel() methods are needed to facilitate long-running Restor
|
||||
This is intended as a replacement for the previously-approved Upload Progress Monitoring design ([Upload Progress Monitoring](https://github.com/vmware-tanzu/velero/blob/main/design/upload-progress.md)) to expand the supported use cases beyond snapshot upload to include what was previously called Async Backup/Restore Item Actions.
|
||||
|
||||
#### Flexible resource policy that can filter volumes to skip in the backup
|
||||
This feature provides a flexible policy to filter volumes in the backup without requiring patching any labels or annotations to the pods or volumes. This policy is configured as k8s ConfigMap and maintained by the users themselves, and it can be extended to more scenarios in the future. By now, the policy rules out volumes from backup depending on the CSI driver, NFS setting, volume size, and StorageClass setting. Please refer to [policy API design](https://github.com/vmware-tanzu/velero/blob/main/design/Implemented/handle-backup-of-volumes-by-resources-filters.md#api-design) for the policy's ConifgMap format. It is not guaranteed to work on unofficial third-party plugins as it may not follow the existing backup workflow code logic of Velero.
|
||||
This feature provides a flexible policy to filter volumes in the backup without requiring patching any labels or annotations to the pods or volumes. This policy is configured as k8s ConfigMap and maintained by the users themselves, and it can be extended to more scenarios in the future. By now, the policy rules out volumes from backup depending on the CSI driver, NFS setting, volume size, and StorageClass setting. Please refer to [Resource policies rules](https://velero.io/docs/v1.11/resource-filtering/#resource-policies) for the policy's ConifgMap format. It is not guaranteed to work on unofficial third-party plugins as it may not follow the existing backup workflow code logic of Velero.
|
||||
|
||||
#### Resource Filters that can distinguish cluster scope and namespace scope resources
|
||||
This feature adds four new resource filters for backup. The new filters are separated into cluster scope and namespace scope. Before this feature, Velero could not filter cluster scope resources precisely. This feature provides the ability and refactors existing resource filter parameters.
|
||||
|
||||
#### New parameter in installation to customize the serviceaccount name
|
||||
The `velero install` sub-command now includes a new parameter,`--service-account-name`, which allows users to specify the ServiceAccountName for the Velero and node-agent pods. This feature may be particularly useful for users who utilize IRSA (IAM Roles for Service Accounts) in Amazon EKS (Elastic Kubernetes Service)."
|
||||
|
||||
#### Add a parameter for setting the Velero server connection with the k8s API server's timeout
|
||||
In Velero, some code pieces need to communicate with the k8s API server. Before v1.11, these code pieces used hard-code timeout settings. This feature adds a resource-timeout parameter in the velero server binary to make it configurable.
|
||||
|
||||
#### Add resource list in the output of the restore describe command
|
||||
Before this feature, Velero restore didn't have a restored resources list as the Velero backup. It's not convenient for users to learn what is restored. This feature adds the resources list and the handling result of the resources (including created, updated, failed, and skipped).
|
||||
|
||||
#### Support JSON format output of backup describe command
|
||||
Before the Velero v1.11 release, users could not choose Velero's backup describe command's output format. The command output format is friendly for human reading, but it's not a structured output, and it's not easy for other programs to get information from it. Velero v1.11 adds a JSON format output for the backup describe command.
|
||||
|
||||
#### Refactor controllers with controller-runtime
|
||||
In v1.11, Backup Controller and Restore controller are refactored with controller-runtime. Till v1.11, all Velero controllers use the controller-runtime framework.
|
||||
|
||||
@@ -59,6 +65,7 @@ To fix CVEs and keep pace with Golang, Velero made changes as follows:
|
||||
|
||||
|
||||
### All Changes
|
||||
* Ignore not found error during patching managedFields (#6110, @ywk253100)
|
||||
* Modify new scope resource filters name. (#6089, @blackpiglet)
|
||||
* Make Velero not exits when EnableCSI is on and CSI snapshot not installed (#6062, @blackpiglet)
|
||||
* Restore Services before Clusters (#6057, @ywk253100)
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM --platform=linux/amd64 golang:1.19-bullseye
|
||||
FROM --platform=linux/amd64 golang:1.19.8-bullseye
|
||||
|
||||
ARG GOPROXY
|
||||
|
||||
@@ -50,7 +50,7 @@ RUN wget --quiet https://github.com/protocolbuffers/protobuf/releases/download/v
|
||||
RUN go install github.com/golang/protobuf/protoc-gen-go@v1.4.3
|
||||
|
||||
# get goreleaser
|
||||
RUN wget --quiet https://github.com/goreleaser/goreleaser/releases/download/v1.12.3/goreleaser_Linux_x86_64.tar.gz && \
|
||||
RUN wget --quiet https://github.com/goreleaser/goreleaser/releases/download/v1.15.2/goreleaser_Linux_x86_64.tar.gz && \
|
||||
tar xvf goreleaser_Linux_x86_64.tar.gz && \
|
||||
mv goreleaser /usr/bin/goreleaser && \
|
||||
chmod +x /usr/bin/goreleaser
|
||||
|
||||
@@ -48,12 +48,10 @@ if [[ "${PUBLISH:-}" != "TRUE" ]]; then
|
||||
goreleaser release \
|
||||
--clean \
|
||||
--release-notes="${RELEASE_NOTES_FILE}" \
|
||||
--skip-publish \
|
||||
--config goreleaser.yaml
|
||||
--skip-publish
|
||||
else
|
||||
echo "Getting ready to publish"
|
||||
goreleaser release \
|
||||
--clean \
|
||||
--release-notes="${RELEASE_NOTES_FILE}"
|
||||
--config goreleaser.yaml
|
||||
fi
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
git:
|
||||
# What should be used to sort tags when gathering the current and previous
|
||||
# tags if there are more than one tag in the same commit.
|
||||
#
|
||||
# Default: `-version:refname`
|
||||
tag_sort: -version:creatordate
|
||||
@@ -1514,10 +1514,13 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
if patchBytes != nil {
|
||||
if _, err = resourceClient.Patch(name, patchBytes); err != nil {
|
||||
ctx.log.Errorf("error patch for managed fields %s: %v", kube.NamespaceAndName(obj), err)
|
||||
errs.Add(namespace, err)
|
||||
return warnings, errs, itemExists
|
||||
if !apierrors.IsNotFound(err) {
|
||||
errs.Add(namespace, err)
|
||||
return warnings, errs, itemExists
|
||||
}
|
||||
} else {
|
||||
ctx.log.Infof("the managed fields for %s is patched", kube.NamespaceAndName(obj))
|
||||
}
|
||||
ctx.log.Infof("the managed fields for %s is patched", kube.NamespaceAndName(obj))
|
||||
}
|
||||
|
||||
if groupResource == kuberesource.Pods {
|
||||
|
||||
@@ -87,7 +87,7 @@ func (p *PVCSelectedNodeChanging) CreateResources() error {
|
||||
p.oldNodeName = nodeName
|
||||
fmt.Printf("Create PVC on node %s\n", p.oldNodeName)
|
||||
pvcAnn := map[string]string{p.ann: nodeName}
|
||||
_, err := CreatePodWithPVC(p.Client, p.namespace, p.podName, "default", p.pvcName, []string{p.volume}, pvcAnn)
|
||||
_, err := CreatePod(p.Client, p.namespace, p.podName, "default", p.pvcName, []string{p.volume}, pvcAnn, nil)
|
||||
Expect(err).To(Succeed())
|
||||
err = WaitForPods(context.Background(), p.Client, p.namespace, []string{p.podName})
|
||||
Expect(err).To(Succeed())
|
||||
|
||||
@@ -85,7 +85,7 @@ func (s *StorageClasssChanging) CreateResources() error {
|
||||
})
|
||||
|
||||
By(fmt.Sprintf("Create pod %s in namespace %s", s.podName, s.namespace), func() {
|
||||
_, err := CreatePodWithPVC(s.Client, s.namespace, s.podName, s.srcStorageClass, "", []string{s.volume}, nil)
|
||||
_, err := CreatePod(s.Client, s.namespace, s.podName, s.srcStorageClass, "", []string{s.volume}, nil, nil)
|
||||
Expect(err).To(Succeed())
|
||||
})
|
||||
By(fmt.Sprintf("Create ConfigMap %s in namespace %s", s.configmaptName, s.VeleroCfg.VeleroNamespace), func() {
|
||||
|
||||
@@ -117,6 +117,7 @@ var _ = Describe("[Backups][BackupsSync] Backups in object storage are synced to
|
||||
|
||||
var _ = Describe("[Schedule][BR][Pause][LongTime] Backup will be created periodly by schedule defined by a Cron expression", ScheduleBackupTest)
|
||||
var _ = Describe("[Schedule][OrederedResources] Backup resources should follow the specific order in schedule", ScheduleOrderedResources)
|
||||
var _ = Describe("[Schedule][BackupCreation] Schedule controller wouldn't create a new backup when it still has pending or InProgress backup", ScheduleBackupCreationTest)
|
||||
|
||||
var _ = Describe("[PrivilegesMgmt][SSR] Velero test on ssr object when controller namespace mix-ups", SSRTest)
|
||||
|
||||
|
||||
@@ -97,7 +97,7 @@ func (p *PVBackupFiltering) CreateResources() error {
|
||||
podName := fmt.Sprintf("pod-%d", i)
|
||||
pods = append(pods, podName)
|
||||
By(fmt.Sprintf("Create pod %s in namespace %s", podName, ns), func() {
|
||||
pod, err := CreatePodWithPVC(p.Client, ns, podName, "e2e-storage-class", "", volumes, nil)
|
||||
pod, err := CreatePod(p.Client, ns, podName, "e2e-storage-class", "", volumes, nil, nil)
|
||||
Expect(err).To(Succeed())
|
||||
ann := map[string]string{
|
||||
p.annotation: volumesToAnnotation,
|
||||
|
||||
138
test/e2e/schedule/schedule-backup-creation.go
Normal file
138
test/e2e/schedule/schedule-backup-creation.go
Normal file
@@ -0,0 +1,138 @@
|
||||
package schedule
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
. "github.com/vmware-tanzu/velero/test/e2e"
|
||||
. "github.com/vmware-tanzu/velero/test/e2e/test"
|
||||
. "github.com/vmware-tanzu/velero/test/e2e/util/k8s"
|
||||
. "github.com/vmware-tanzu/velero/test/e2e/util/velero"
|
||||
)
|
||||
|
||||
type ScheduleBackupCreation struct {
|
||||
TestCase
|
||||
namespace string
|
||||
ScheduleName string
|
||||
ScheduleArgs []string
|
||||
Period int //Limitation: The unit is minitue only and 60 is divisible by it
|
||||
randBackupName string
|
||||
verifyTimes int
|
||||
volume string
|
||||
podName string
|
||||
pvcName string
|
||||
podAnn map[string]string
|
||||
podSleepDuration time.Duration
|
||||
}
|
||||
|
||||
var ScheduleBackupCreationTest func() = TestFunc(&ScheduleBackupCreation{namespace: "sch1", TestCase: TestCase{NSBaseName: "schedule-backup-creation-test", UseVolumeSnapshots: false}})
|
||||
|
||||
func (n *ScheduleBackupCreation) Init() error {
|
||||
n.VeleroCfg = VeleroCfg
|
||||
n.Client = *n.VeleroCfg.ClientToInstallVelero
|
||||
n.Period = 3 // Unit is minute
|
||||
n.verifyTimes = 5 // More larger verify times more confidence we have
|
||||
podSleepDurationStr := "300s"
|
||||
n.podSleepDuration, _ = time.ParseDuration(podSleepDurationStr)
|
||||
n.TestMsg = &TestMSG{
|
||||
Desc: "Schedule controller wouldn't create a new backup when it still has pending or InProgress backup",
|
||||
FailedMSG: "Failed to verify schedule back creation behavior",
|
||||
Text: "Schedule controller wouldn't create a new backup when it still has pending or InProgress backup",
|
||||
}
|
||||
n.podAnn = map[string]string{
|
||||
"pre.hook.backup.velero.io/container": n.podName,
|
||||
"pre.hook.backup.velero.io/command": "[\"sleep\", \"" + podSleepDurationStr + "\"]",
|
||||
"pre.hook.backup.velero.io/timeout": "600s",
|
||||
}
|
||||
n.volume = "volume-1"
|
||||
n.podName = "pod-1"
|
||||
n.pvcName = "pvc-1"
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *ScheduleBackupCreation) StartRun() error {
|
||||
n.namespace = fmt.Sprintf("%s-%s", n.NSBaseName, "ns")
|
||||
n.ScheduleName = n.ScheduleName + "schedule-" + UUIDgen.String()
|
||||
n.RestoreName = n.RestoreName + "restore-ns-mapping-" + UUIDgen.String()
|
||||
|
||||
n.ScheduleArgs = []string{
|
||||
"--include-namespaces", n.namespace,
|
||||
"--schedule=*/" + fmt.Sprintf("%v", n.Period) + " * * * *",
|
||||
"--default-volumes-to-fs-backup",
|
||||
}
|
||||
Expect(n.Period < 30).To(Equal(true))
|
||||
return nil
|
||||
}
|
||||
func (p *ScheduleBackupCreation) CreateResources() error {
|
||||
p.Ctx, _ = context.WithTimeout(context.Background(), 60*time.Minute)
|
||||
By(fmt.Sprintf("Create namespace %s", p.namespace), func() {
|
||||
Expect(CreateNamespace(context.Background(), p.Client, p.namespace)).To(Succeed(),
|
||||
fmt.Sprintf("Failed to create namespace %s", p.namespace))
|
||||
})
|
||||
|
||||
By(fmt.Sprintf("Create pod %s in namespace %s", p.podName, p.namespace), func() {
|
||||
_, err := CreatePod(p.Client, p.namespace, p.podName, "default", p.pvcName, []string{p.volume}, nil, p.podAnn)
|
||||
Expect(err).To(Succeed())
|
||||
err = WaitForPods(context.Background(), p.Client, p.namespace, []string{p.podName})
|
||||
Expect(err).To(Succeed())
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *ScheduleBackupCreation) Backup() error {
|
||||
// Wait until the beginning of the given period to create schedule, it will give us
|
||||
// a predictable period to wait for the first scheduled backup, and verify no immediate
|
||||
// scheduled backup was created between schedule creation and first scheduled backup.
|
||||
By(fmt.Sprintf("Creating schedule %s ......\n", n.ScheduleName), func() {
|
||||
for i := 0; i < n.Period*60/30; i++ {
|
||||
time.Sleep(30 * time.Second)
|
||||
now := time.Now().Minute()
|
||||
triggerNow := now % n.Period
|
||||
if triggerNow == 0 {
|
||||
Expect(VeleroScheduleCreate(n.Ctx, VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, n.ScheduleName, n.ScheduleArgs)).To(Succeed(), func() string {
|
||||
RunDebug(context.Background(), VeleroCfg.VeleroCLI, VeleroCfg.VeleroNamespace, "", "")
|
||||
return "Fail to restore workload"
|
||||
})
|
||||
break
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
By("Delay one more minute to make sure the new backup was created in the given period", func() {
|
||||
time.Sleep(1 * time.Minute)
|
||||
})
|
||||
|
||||
By(fmt.Sprintf("Get backups every %d minute, and backups count should increase 1 more step in the same pace\n", n.Period), func() {
|
||||
for i := 1; i <= n.verifyTimes; i++ {
|
||||
fmt.Printf("Start to sleep %d minute #%d time...\n", n.podSleepDuration, i)
|
||||
mi, _ := time.ParseDuration("60s")
|
||||
time.Sleep(n.podSleepDuration + mi)
|
||||
bMap := make(map[string]string)
|
||||
backupsInfo, err := GetScheduledBackupsCreationTime(context.Background(), VeleroCfg.VeleroCLI, "default", n.ScheduleName)
|
||||
Expect(err).To(Succeed())
|
||||
Expect(len(backupsInfo) == i).To(Equal(true))
|
||||
for index, bi := range backupsInfo {
|
||||
bList := strings.Split(bi, ",")
|
||||
fmt.Printf("Backup %d: %v\n", index, bList)
|
||||
bMap[bList[0]] = bList[1]
|
||||
_, err := time.Parse("2006-01-02 15:04:05 -0700 MST", bList[1])
|
||||
Expect(err).To(Succeed())
|
||||
}
|
||||
if i == n.verifyTimes-1 {
|
||||
backupInfo := backupsInfo[rand.Intn(len(backupsInfo))]
|
||||
n.randBackupName = strings.Split(backupInfo, ",")[0]
|
||||
}
|
||||
}
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *ScheduleBackupCreation) Restore() error {
|
||||
return nil
|
||||
}
|
||||
@@ -31,7 +31,7 @@ func (n *ScheduleBackup) Init() error {
|
||||
n.VeleroCfg = VeleroCfg
|
||||
n.Client = *n.VeleroCfg.ClientToInstallVelero
|
||||
n.Period = 3 // Unit is minute
|
||||
n.verifyTimes = 5 // More verify times more confidence
|
||||
n.verifyTimes = 5 // More larger verify times more confidence we have
|
||||
n.TestMsg = &TestMSG{
|
||||
Desc: "Set up a scheduled backup defined by a Cron expression",
|
||||
FailedMSG: "Failed to schedule a backup",
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
@@ -64,12 +63,12 @@ func WaitForPods(ctx context.Context, client TestClient, namespace string, pods
|
||||
checkPod, err := client.ClientGo.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
//Should ignore "etcdserver: request timed out" kind of errors, try to get pod status again before timeout.
|
||||
fmt.Println(errors.Wrap(err, fmt.Sprintf("Failed to verify pod %s/%s is %s, try again...\n", namespace, podName, corev1api.PodRunning)))
|
||||
fmt.Println(errors.Wrap(err, fmt.Sprintf("Failed to verify pod %s/%s is %s, try again...\n", namespace, podName, corev1.PodRunning)))
|
||||
return false, nil
|
||||
}
|
||||
// If any pod is still waiting we don't need to check any more so return and wait for next poll interval
|
||||
if checkPod.Status.Phase != corev1api.PodRunning {
|
||||
fmt.Printf("Pod %s is in state %s waiting for it to be %s\n", podName, checkPod.Status.Phase, corev1api.PodRunning)
|
||||
if checkPod.Status.Phase != corev1.PodRunning {
|
||||
fmt.Printf("Pod %s is in state %s waiting for it to be %s\n", podName, checkPod.Status.Phase, corev1.PodRunning)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
@@ -255,35 +254,6 @@ func GetPVByPodName(client TestClient, namespace, podName string) (string, error
|
||||
}
|
||||
return pv_value.Name, nil
|
||||
}
|
||||
func CreatePodWithPVC(client TestClient, ns, podName, sc, pvcName string, volumeNameList []string, pvcAnn map[string]string) (*corev1.Pod, error) {
|
||||
volumes := []corev1.Volume{}
|
||||
for _, volume := range volumeNameList {
|
||||
var _pvcName string
|
||||
if pvcName == "" {
|
||||
_pvcName = fmt.Sprintf("pvc-%s", volume)
|
||||
} else {
|
||||
_pvcName = pvcName
|
||||
}
|
||||
pvc, err := CreatePVC(client, ns, _pvcName, sc, pvcAnn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
volumes = append(volumes, corev1.Volume{
|
||||
Name: volume,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvc.Name,
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
pod, err := CreatePod(client, ns, podName, volumes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
func CreateFileToPod(ctx context.Context, namespace, podName, volume, filename, content string) error {
|
||||
arg := []string{"exec", "-n", namespace, "-c", podName, podName,
|
||||
|
||||
@@ -26,7 +26,34 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func CreatePod(client TestClient, ns, name string, volumes []corev1.Volume) (*corev1.Pod, error) {
|
||||
func CreatePod(client TestClient, ns, name, sc, pvcName string, volumeNameList []string, pvcAnn, ann map[string]string) (*corev1.Pod, error) {
|
||||
if pvcName != "" && len(volumeNameList) != 1 {
|
||||
return nil, errors.New("Volume name list should contain only 1 since PVC name is not empty")
|
||||
}
|
||||
volumes := []corev1.Volume{}
|
||||
for _, volume := range volumeNameList {
|
||||
var _pvcName string
|
||||
if pvcName == "" {
|
||||
_pvcName = fmt.Sprintf("pvc-%s", volume)
|
||||
} else {
|
||||
_pvcName = pvcName
|
||||
}
|
||||
pvc, err := CreatePVC(client, ns, _pvcName, sc, pvcAnn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
volumes = append(volumes, corev1.Volume{
|
||||
Name: volume,
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvc.Name,
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
vmList := []corev1.VolumeMount{}
|
||||
for _, v := range volumes {
|
||||
vmList = append(vmList, corev1.VolumeMount{
|
||||
@@ -34,9 +61,11 @@ func CreatePod(client TestClient, ns, name string, volumes []corev1.Volume) (*co
|
||||
MountPath: "/" + v.Name,
|
||||
})
|
||||
}
|
||||
|
||||
p := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Name: name,
|
||||
Annotations: ann,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
|
||||
@@ -38,7 +38,7 @@ func CreatePVC(client TestClient, ns, name, sc string, ann map[string]string) (*
|
||||
},
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceStorage: resource.MustParse("1Gi"),
|
||||
corev1.ResourceStorage: resource.MustParse("1Mi"),
|
||||
},
|
||||
},
|
||||
StorageClassName: &sc,
|
||||
|
||||
Reference in New Issue
Block a user