mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-11 23:32:53 +00:00
Compare commits
58 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
82a100981c | ||
|
|
798310015b | ||
|
|
14a1be8416 | ||
|
|
beaaa3aca2 | ||
|
|
a3f32f942f | ||
|
|
11dbf8c974 | ||
|
|
efd8eb3e3c | ||
|
|
6f5d9b030a | ||
|
|
b7ee7e4e1c | ||
|
|
962f543470 | ||
|
|
6ad78a1d1e | ||
|
|
f911e13242 | ||
|
|
515eff5330 | ||
|
|
1aa5004606 | ||
|
|
0ae1f9c565 | ||
|
|
70a03ed27f | ||
|
|
5d84a27300 | ||
|
|
b0945d7740 | ||
|
|
727c633226 | ||
|
|
2b9a799e84 | ||
|
|
313f836d23 | ||
|
|
3de8be83f4 | ||
|
|
9937607e72 | ||
|
|
bfbefee0f5 | ||
|
|
61b247419c | ||
|
|
6fe8d4b65f | ||
|
|
e4c84b7b3d | ||
|
|
dc45cd141c | ||
|
|
af6912286b | ||
|
|
54eaa57ada | ||
|
|
2d88c9a436 | ||
|
|
7a749b8cf7 | ||
|
|
5f86cfae15 | ||
|
|
8487585732 | ||
|
|
5838e35e2e | ||
|
|
56eb492acb | ||
|
|
5ac7d52cac | ||
|
|
b7073fb2bf | ||
|
|
ac58c7508b | ||
|
|
18375cf1a9 | ||
|
|
b870847375 | ||
|
|
4d20c5a112 | ||
|
|
e76b697b45 | ||
|
|
b5c14d90bb | ||
|
|
a6fb4bb65a | ||
|
|
1996ee3be0 | ||
|
|
6021f148c4 | ||
|
|
1ed7481c90 | ||
|
|
ce9ac0d8b0 | ||
|
|
5dbc98e679 | ||
|
|
8c2a75eea5 | ||
|
|
e9e2b66b5f | ||
|
|
ef890f2a5e | ||
|
|
6418fda2e4 | ||
|
|
43c70b4691 | ||
|
|
2021e4fa58 | ||
|
|
162cf6e99b | ||
|
|
881e562ab1 |
@@ -14,7 +14,7 @@
|
||||
|
||||
dist: _output
|
||||
builds:
|
||||
- main: ./cmd/velero/main.go
|
||||
- main: ./cmd/velero/velero.go
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
FROM --platform=$BUILDPLATFORM golang:1.17.11 as builder-env
|
||||
FROM --platform=$BUILDPLATFORM golang:1.17.13 as builder-env
|
||||
|
||||
ARG GOPROXY
|
||||
ARG PKG
|
||||
@@ -50,7 +50,7 @@ RUN mkdir -p /output/usr/bin && \
|
||||
go build -o /output/${BIN} \
|
||||
-ldflags "${LDFLAGS}" ${PKG}/cmd/${BIN}
|
||||
|
||||
FROM gcr.io/distroless/base-debian11@sha256:e672eb713e56feb13e349773973b81b1b9284f70b15cf18d1a09ad31a03abe59
|
||||
FROM gcr.io/distroless/base-debian11@sha256:49d2923f35d66b8402487a7c01bc62a66d8279cd42e89c11b64cdce8d5826c03
|
||||
|
||||
LABEL maintainer="Nolan Brubaker <brubakern@vmware.com>"
|
||||
|
||||
|
||||
@@ -1,3 +1,54 @@
|
||||
## v1.9.2
|
||||
### 2022-09-14
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.9.2
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.9.2`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.9/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.9/upgrade-to-1.9/
|
||||
|
||||
### All changes
|
||||
|
||||
* Fix CVE-2022-1962 by bumping up golang version to 1.17.13 (#5286, @qiuming-best)
|
||||
* Fix code spell check fail (#5300, @qiuming-best)
|
||||
* Fix nil pointer panic when restoring StatefulSets (#5301, @divolgin)
|
||||
* Check for empty ns list before checking nslist[0] (#5302, @sseago)
|
||||
* check vsc null pointer (#5303, @lilongfeng0902)
|
||||
* Fix edge cases for already exists resources (#5304, @shubham-pampattiwar)
|
||||
* Increase ensure restic repository timeout to 5m (#5336, @shubham-pampattiwar)
|
||||
* Added DownloadTargetKindCSIBackupVolumeSnapshots for retrieving the signed URL to download only the `<backup name>`-csi-volumesnapshots.json.gz and DownloadTargetKindCSIBackupVolumeSnapshotContents to download only `<backup name>`-csi-volumesnapshotcontents.json.gz in the DownloadRequest CR structure. These files are already present in the backup layout. (#5307, @anshulahuja98)
|
||||
## v1.9.1
|
||||
### 2022-08-03
|
||||
|
||||
### Download
|
||||
https://github.com/vmware-tanzu/velero/releases/tag/v1.9.1
|
||||
|
||||
### Container Image
|
||||
`velero/velero:v1.9.1`
|
||||
|
||||
### Documentation
|
||||
https://velero.io/docs/v1.9/
|
||||
|
||||
### Upgrading
|
||||
https://velero.io/docs/v1.9/upgrade-to-1.9/
|
||||
|
||||
### All changes
|
||||
|
||||
* Fix bsl validation bug: the BSL is validated continually and doesn't respect the validation period configured (#5112, @ywk253100)
|
||||
* Modify BackupStoreGetter to avoid BSL spec changes (#5134, @sseago)
|
||||
* Delay CA file deletion in PVB controller. (#5150, @jxun)
|
||||
* Skip registering "crd-remap-version" plugin when feature flag "EnableAPIGroupVersions" is set (#5173, @reasonerjt)
|
||||
* Fix restic backups to multiple backup storage locations bug (#5175, @qiuming-best)
|
||||
* Make CSI snapshot creation timeout configurable. (#5189, @jxun)
|
||||
* Add annotation "pv.kubernetes.io/migrated-to" for CSI checking. (#5186, @jxun)
|
||||
* Bump up base image and package version to fix CVEs. (#5202, @ywk253100)
|
||||
|
||||
## v1.9.0
|
||||
### 2022-06-13
|
||||
|
||||
@@ -102,3 +153,4 @@ With bumping up the API to v1 in CSI plugin, the v0.3.0 CSI plugin will only wor
|
||||
* Fix E2E test [Backups][Deletion][Restic] on GCP. (#4968, @jxun)
|
||||
* Disable status as sub resource in CRDs (#4972, @ywk253100)
|
||||
* Add more information for failing to get path or snapshot in restic backup and restore. (#4988, @jxun)
|
||||
* When spec.RestoreStatus is empty, don't restore status (#5015, @sseago)
|
||||
|
||||
@@ -37,6 +37,11 @@ spec:
|
||||
spec:
|
||||
description: BackupSpec defines the specification for a Velero backup.
|
||||
properties:
|
||||
csiSnapshotTimeout:
|
||||
description: CSISnapshotTimeout specifies the time used to wait for
|
||||
CSI VolumeSnapshot status turns to ReadyToUse during creation, before
|
||||
returning error as timeout. The default value is 10 minute.
|
||||
type: string
|
||||
defaultVolumesToRestic:
|
||||
description: DefaultVolumesToRestic specifies whether restic should
|
||||
be used to take a backup of all pod volumes by default.
|
||||
|
||||
@@ -50,6 +50,8 @@ spec:
|
||||
- BackupResourceList
|
||||
- RestoreLog
|
||||
- RestoreResults
|
||||
- CSIBackupVolumeSnapshots
|
||||
- CSIBackupVolumeSnapshotContents
|
||||
type: string
|
||||
name:
|
||||
description: Name is the name of the kubernetes resource with
|
||||
|
||||
@@ -61,6 +61,11 @@ spec:
|
||||
description: Template is the definition of the Backup to be run on
|
||||
the provided schedule
|
||||
properties:
|
||||
csiSnapshotTimeout:
|
||||
description: CSISnapshotTimeout specifies the time used to wait
|
||||
for CSI VolumeSnapshot status turns to ReadyToUse during creation,
|
||||
before returning error as timeout. The default value is 10 minute.
|
||||
type: string
|
||||
defaultVolumesToRestic:
|
||||
description: DefaultVolumesToRestic specifies whether restic should
|
||||
be used to take a backup of all pod volumes by default.
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -505,6 +505,8 @@ spec:
|
||||
- BackupResourceList
|
||||
- RestoreLog
|
||||
- RestoreResults
|
||||
- CSIBackupVolumeSnapshots
|
||||
- CSIBackupVolumeSnapshotContents
|
||||
type: string
|
||||
name:
|
||||
description: Name is the name of the kubernetes resource with
|
||||
|
||||
@@ -429,7 +429,7 @@ Instead, a new method for 'Progress' will be added to interface. Velero server r
|
||||
|
||||
But, this involves good amount of changes and needs a way for backward compatibility.
|
||||
|
||||
As volume plugins are mostly K8s native, its fine to go ahead with current limiation.
|
||||
As volume plugins are mostly K8s native, its fine to go ahead with current limitation.
|
||||
|
||||
### Update Backup CR
|
||||
Instead of creating new CRs, plugins can directly update the status of Backup CR. But, this deviates from current approach of having separate CRs like PodVolumeBackup/PodVolumeRestore to know operations progress.
|
||||
|
||||
16
go.mod
16
go.mod
@@ -27,7 +27,7 @@ require (
|
||||
github.com/onsi/ginkgo v1.16.5
|
||||
github.com/onsi/gomega v1.16.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/prometheus/client_golang v1.12.2
|
||||
github.com/robfig/cron v1.1.0
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/spf13/afero v1.6.0
|
||||
@@ -36,7 +36,7 @@ require (
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/vmware-tanzu/crash-diagnostics v0.3.7
|
||||
golang.org/x/mod v0.4.2
|
||||
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
google.golang.org/api v0.56.0
|
||||
@@ -62,7 +62,7 @@ require (
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dimchansky/utfbom v1.1.1 // indirect
|
||||
github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect
|
||||
@@ -78,7 +78,7 @@ require (
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af // indirect
|
||||
github.com/json-iterator/go v1.1.11 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/mattn/go-colorable v0.1.9 // indirect
|
||||
github.com/mattn/go-ieproxy v0.0.1 // indirect
|
||||
@@ -88,13 +88,13 @@ require (
|
||||
github.com/mitchellh/go-testing-interface v1.0.0 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.1 // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/nxadm/tail v1.4.8 // indirect
|
||||
github.com/oklog/run v1.0.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.26.0 // indirect
|
||||
github.com/prometheus/procfs v0.6.0 // indirect
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/stretchr/objx v0.2.0 // indirect
|
||||
github.com/vladimirvivien/gexe v0.1.1 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
@@ -103,7 +103,7 @@ require (
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/zap v1.19.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 // indirect
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
||||
|
||||
24
go.sum
24
go.sum
@@ -141,8 +141,9 @@ github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6
|
||||
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
@@ -404,8 +405,9 @@ github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
|
||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
@@ -481,8 +483,9 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
@@ -531,8 +534,9 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34=
|
||||
github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
@@ -542,14 +546,16 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/robfig/cron v1.1.0 h1:jk4/Hud3TTdcrJgUOBgsqrZBarcxl6ADIjSC2iniwLY=
|
||||
github.com/robfig/cron v1.1.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
|
||||
@@ -785,8 +791,9 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023 h1:ADo5wSpq2gqaCGQWzk7S5vd//0iyyLeAratkEoG5dLE=
|
||||
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5 h1:wjuX4b5yYQnEQHzd+CBcrcC6OVR2J1CN6mUy0oSxIPo=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -894,8 +901,9 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 h1:id054HUawV2/6IGm2IV8KZQjqtwAOo2CYlOToYqa0d0=
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE=
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM golang:1.17.11
|
||||
FROM golang:1.17.13
|
||||
|
||||
ARG GOPROXY
|
||||
|
||||
|
||||
@@ -95,7 +95,7 @@ eval $(go run $DIR/chk_version.go)
|
||||
printf "To clarify, you've provided a version string of $VELERO_VERSION.\n"
|
||||
printf "Based on this, the following assumptions have been made: \n"
|
||||
|
||||
# $VELERO_PATCH gets populated by the chk_version.go scrip that parses and verifies the given version format
|
||||
# $VELERO_PATCH gets populated by the chk_version.go script that parses and verifies the given version format
|
||||
# If we've got a patch release, we assume the tag is on release branch.
|
||||
if [[ "$VELERO_PATCH" != 0 ]]; then
|
||||
printf "*\t This is a patch release.\n"
|
||||
|
||||
@@ -110,6 +110,12 @@ type BackupSpec struct {
|
||||
// +optional
|
||||
// +nullable
|
||||
OrderedResources map[string]string `json:"orderedResources,omitempty"`
|
||||
|
||||
// CSISnapshotTimeout specifies the time used to wait for CSI VolumeSnapshot status turns to
|
||||
// ReadyToUse during creation, before returning error as timeout.
|
||||
// The default value is 10 minute.
|
||||
// +optional
|
||||
CSISnapshotTimeout metav1.Duration `json:"csiSnapshotTimeout,omitempty"`
|
||||
}
|
||||
|
||||
// BackupHooks contains custom behaviors that should be executed at different phases of the backup.
|
||||
|
||||
@@ -25,17 +25,19 @@ type DownloadRequestSpec struct {
|
||||
}
|
||||
|
||||
// DownloadTargetKind represents what type of file to download.
|
||||
// +kubebuilder:validation:Enum=BackupLog;BackupContents;BackupVolumeSnapshots;BackupItemSnapshots;BackupResourceList;RestoreLog;RestoreResults
|
||||
// +kubebuilder:validation:Enum=BackupLog;BackupContents;BackupVolumeSnapshots;BackupItemSnapshots;BackupResourceList;RestoreLog;RestoreResults;CSIBackupVolumeSnapshots;CSIBackupVolumeSnapshotContents
|
||||
type DownloadTargetKind string
|
||||
|
||||
const (
|
||||
DownloadTargetKindBackupLog DownloadTargetKind = "BackupLog"
|
||||
DownloadTargetKindBackupContents DownloadTargetKind = "BackupContents"
|
||||
DownloadTargetKindBackupVolumeSnapshots DownloadTargetKind = "BackupVolumeSnapshots"
|
||||
DownloadTargetKindBackupItemSnapshots DownloadTargetKind = "BackupItemSnapshots"
|
||||
DownloadTargetKindBackupResourceList DownloadTargetKind = "BackupResourceList"
|
||||
DownloadTargetKindRestoreLog DownloadTargetKind = "RestoreLog"
|
||||
DownloadTargetKindRestoreResults DownloadTargetKind = "RestoreResults"
|
||||
DownloadTargetKindBackupLog DownloadTargetKind = "BackupLog"
|
||||
DownloadTargetKindBackupContents DownloadTargetKind = "BackupContents"
|
||||
DownloadTargetKindBackupVolumeSnapshots DownloadTargetKind = "BackupVolumeSnapshots"
|
||||
DownloadTargetKindBackupItemSnapshots DownloadTargetKind = "BackupItemSnapshots"
|
||||
DownloadTargetKindBackupResourceList DownloadTargetKind = "BackupResourceList"
|
||||
DownloadTargetKindRestoreLog DownloadTargetKind = "RestoreLog"
|
||||
DownloadTargetKindRestoreResults DownloadTargetKind = "RestoreResults"
|
||||
DownloadTargetKindCSIBackupVolumeSnapshots DownloadTargetKind = "CSIBackupVolumeSnapshots"
|
||||
DownloadTargetKindCSIBackupVolumeSnapshotContents DownloadTargetKind = "CSIBackupVolumeSnapshotContents"
|
||||
)
|
||||
|
||||
// DownloadTarget is the specification for what kind of file to download, and the name of the
|
||||
|
||||
@@ -250,6 +250,7 @@ func (in *BackupSpec) DeepCopyInto(out *BackupSpec) {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
out.CSISnapshotTimeout = in.CSISnapshotTimeout
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSpec.
|
||||
|
||||
@@ -225,8 +225,11 @@ func (r *itemCollector) getResourceItems(log logrus.FieldLogger, gv schema.Group
|
||||
|
||||
namespacesToList := getNamespacesToList(r.backupRequest.NamespaceIncludesExcludes)
|
||||
|
||||
// Check if we're backing up namespaces, and only certain ones
|
||||
if gr == kuberesource.Namespaces && namespacesToList[0] != "" {
|
||||
// Check if we're backing up namespaces for a less-than-full backup.
|
||||
// We enter this block if resource is Namespaces and the namespae list is either empty or contains
|
||||
// an explicit namespace list. (We skip this block if the list contains "" since that indicates
|
||||
// a full-cluster backup
|
||||
if gr == kuberesource.Namespaces && (len(namespacesToList) == 0 || namespacesToList[0] != "") {
|
||||
resourceClient, err := r.dynamicFactory.ClientForGroupVersionResource(gv, resource, "")
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error getting dynamic client")
|
||||
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -233,3 +233,9 @@ func (b *BackupBuilder) OrderedResources(orders map[string]string) *BackupBuilde
|
||||
b.object.Spec.OrderedResources = orders
|
||||
return b
|
||||
}
|
||||
|
||||
// CSISnapshotTimeout sets the Backup's CSISnapshotTimeout
|
||||
func (b *BackupBuilder) CSISnapshotTimeout(timeout time.Duration) *BackupBuilder {
|
||||
b.object.Spec.CSISnapshotTimeout.Duration = timeout
|
||||
return b
|
||||
}
|
||||
|
||||
@@ -98,6 +98,7 @@ type CreateOptions struct {
|
||||
SnapshotLocations []string
|
||||
FromSchedule string
|
||||
OrderedResources string
|
||||
CSISnapshotTimeout time.Duration
|
||||
|
||||
client veleroclient.Interface
|
||||
}
|
||||
@@ -122,6 +123,7 @@ func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) {
|
||||
flags.StringSliceVar(&o.SnapshotLocations, "volume-snapshot-locations", o.SnapshotLocations, "List of locations (at most one per provider) where volume snapshots should be stored.")
|
||||
flags.VarP(&o.Selector, "selector", "l", "Only back up resources matching this label selector.")
|
||||
flags.StringVar(&o.OrderedResources, "ordered-resources", "", "Mapping Kinds to an ordered list of specific resources of that Kind. Resource names are separated by commas and their names are in format 'namespace/resourcename'. For cluster scope resource, simply use resource name. Key-value pairs in the mapping are separated by semi-colon. Example: 'pods=ns1/pod1,ns1/pod2;persistentvolumeclaims=ns1/pvc4,ns1/pvc8'. Optional.")
|
||||
flags.DurationVar(&o.CSISnapshotTimeout, "csi-snapshot-timeout", o.CSISnapshotTimeout, "How long to wait for CSI snapshot creation before timeout.")
|
||||
f := flags.VarPF(&o.SnapshotVolumes, "snapshot-volumes", "", "Take snapshots of PersistentVolumes as part of the backup.")
|
||||
// this allows the user to just specify "--snapshot-volumes" as shorthand for "--snapshot-volumes=true"
|
||||
// like a normal bool flag
|
||||
@@ -332,7 +334,8 @@ func (o *CreateOptions) BuildBackup(namespace string) (*velerov1api.Backup, erro
|
||||
LabelSelector(o.Selector.LabelSelector).
|
||||
TTL(o.TTL).
|
||||
StorageLocation(o.StorageLocation).
|
||||
VolumeSnapshotLocations(o.SnapshotLocations...)
|
||||
VolumeSnapshotLocations(o.SnapshotLocations...).
|
||||
CSISnapshotTimeout(o.CSISnapshotTimeout)
|
||||
if len(o.OrderedResources) > 0 {
|
||||
orders, err := ParseOrderedResources(o.OrderedResources)
|
||||
if err != nil {
|
||||
|
||||
@@ -19,6 +19,7 @@ package backup
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -35,6 +36,7 @@ func TestCreateOptions_BuildBackup(t *testing.T) {
|
||||
o.Labels.Set("velero.io/test=true")
|
||||
o.OrderedResources = "pods=p1,p2;persistentvolumeclaims=pvc1,pvc2"
|
||||
orders, err := ParseOrderedResources(o.OrderedResources)
|
||||
o.CSISnapshotTimeout = 20 * time.Minute
|
||||
assert.NoError(t, err)
|
||||
|
||||
backup, err := o.BuildBackup(testNamespace)
|
||||
@@ -46,6 +48,7 @@ func TestCreateOptions_BuildBackup(t *testing.T) {
|
||||
SnapshotVolumes: o.SnapshotVolumes.Value,
|
||||
IncludeClusterResources: o.IncludeClusterResources.Value,
|
||||
OrderedResources: orders,
|
||||
CSISnapshotTimeout: metav1.Duration{Duration: o.CSISnapshotTimeout},
|
||||
}, backup.Spec)
|
||||
|
||||
assert.Equal(t, map[string]string{
|
||||
|
||||
@@ -145,6 +145,7 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
|
||||
VolumeSnapshotLocations: o.BackupOptions.SnapshotLocations,
|
||||
DefaultVolumesToRestic: o.BackupOptions.DefaultVolumesToRestic.Value,
|
||||
OrderedResources: orders,
|
||||
CSISnapshotTimeout: metav1.Duration{Duration: o.BackupOptions.CSISnapshotTimeout},
|
||||
},
|
||||
Schedule: o.Schedule,
|
||||
UseOwnerReferencesInBackup: &o.UseOwnerReferencesInBackup,
|
||||
|
||||
@@ -19,9 +19,11 @@ package plugin
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
apiextensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/features"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/backup"
|
||||
"github.com/vmware-tanzu/velero/pkg/client"
|
||||
velerodiscovery "github.com/vmware-tanzu/velero/pkg/discovery"
|
||||
@@ -36,11 +38,10 @@ func NewCommand(f client.Factory) *cobra.Command {
|
||||
Hidden: true,
|
||||
Short: "INTERNAL COMMAND ONLY - not intended to be run directly by users",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
pluginServer.
|
||||
pluginServer = pluginServer.
|
||||
RegisterBackupItemAction("velero.io/pv", newPVBackupItemAction).
|
||||
RegisterBackupItemAction("velero.io/pod", newPodBackupItemAction).
|
||||
RegisterBackupItemAction("velero.io/service-account", newServiceAccountBackupItemAction(f)).
|
||||
RegisterBackupItemAction("velero.io/crd-remap-version", newRemapCRDVersionAction(f)).
|
||||
RegisterRestoreItemAction("velero.io/job", newJobRestoreItemAction).
|
||||
RegisterRestoreItemAction("velero.io/pod", newPodRestoreItemAction).
|
||||
RegisterRestoreItemAction("velero.io/restic", newResticRestoreItemAction(f)).
|
||||
@@ -55,13 +56,15 @@ func NewCommand(f client.Factory) *cobra.Command {
|
||||
RegisterRestoreItemAction("velero.io/crd-preserve-fields", newCRDV1PreserveUnknownFieldsItemAction).
|
||||
RegisterRestoreItemAction("velero.io/change-pvc-node-selector", newChangePVCNodeSelectorItemAction(f)).
|
||||
RegisterRestoreItemAction("velero.io/apiservice", newAPIServiceRestoreItemAction).
|
||||
RegisterRestoreItemAction("velero.io/admission-webhook-configuration", newAdmissionWebhookConfigurationAction).
|
||||
Serve()
|
||||
RegisterRestoreItemAction("velero.io/admission-webhook-configuration", newAdmissionWebhookConfigurationAction)
|
||||
if !features.IsEnabled(velerov1api.APIGroupVersionsFeatureFlag) {
|
||||
// Do not register crd-remap-version BIA if the API Group feature flag is enabled, so that the v1 CRD can be backed up
|
||||
pluginServer = pluginServer.RegisterBackupItemAction("velero.io/crd-remap-version", newRemapCRDVersionAction(f))
|
||||
}
|
||||
pluginServer.Serve()
|
||||
},
|
||||
}
|
||||
|
||||
pluginServer.BindFlags(c.Flags())
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
|
||||
@@ -102,6 +102,8 @@ const (
|
||||
// the default TTL for a backup
|
||||
defaultBackupTTL = 30 * 24 * time.Hour
|
||||
|
||||
defaultCSISnapshotTimeout = 10 * time.Minute
|
||||
|
||||
// defaultCredentialsDirectory is the path on disk where credential
|
||||
// files will be written to
|
||||
defaultCredentialsDirectory = "/tmp/credentials"
|
||||
@@ -111,7 +113,7 @@ type serverConfig struct {
|
||||
// TODO(2.0) Deprecate defaultBackupLocation
|
||||
pluginDir, metricsAddress, defaultBackupLocation string
|
||||
backupSyncPeriod, podVolumeOperationTimeout, resourceTerminatingTimeout time.Duration
|
||||
defaultBackupTTL, storeValidationFrequency time.Duration
|
||||
defaultBackupTTL, storeValidationFrequency, defaultCSISnapshotTimeout time.Duration
|
||||
restoreResourcePriorities []string
|
||||
defaultVolumeSnapshotLocations map[string]string
|
||||
restoreOnly bool
|
||||
@@ -142,6 +144,7 @@ func NewCommand(f client.Factory) *cobra.Command {
|
||||
defaultVolumeSnapshotLocations: make(map[string]string),
|
||||
backupSyncPeriod: defaultBackupSyncPeriod,
|
||||
defaultBackupTTL: defaultBackupTTL,
|
||||
defaultCSISnapshotTimeout: defaultCSISnapshotTimeout,
|
||||
storeValidationFrequency: defaultStoreValidationFrequency,
|
||||
podVolumeOperationTimeout: defaultPodVolumeOperationTimeout,
|
||||
restoreResourcePriorities: defaultRestorePriorities,
|
||||
@@ -650,6 +653,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
||||
s.config.defaultBackupLocation,
|
||||
s.config.defaultVolumesToRestic,
|
||||
s.config.defaultBackupTTL,
|
||||
s.config.defaultCSISnapshotTimeout,
|
||||
s.sharedInformerFactory.Velero().V1().VolumeSnapshotLocations().Lister(),
|
||||
defaultVolumeSnapshotLocations,
|
||||
s.metrics,
|
||||
|
||||
@@ -87,6 +87,7 @@ type backupController struct {
|
||||
defaultBackupLocation string
|
||||
defaultVolumesToRestic bool
|
||||
defaultBackupTTL time.Duration
|
||||
defaultCSISnapshotTimeout time.Duration
|
||||
snapshotLocationLister velerov1listers.VolumeSnapshotLocationLister
|
||||
defaultSnapshotLocations map[string]string
|
||||
metrics *metrics.ServerMetrics
|
||||
@@ -111,6 +112,7 @@ func NewBackupController(
|
||||
defaultBackupLocation string,
|
||||
defaultVolumesToRestic bool,
|
||||
defaultBackupTTL time.Duration,
|
||||
defaultCSISnapshotTimeout time.Duration,
|
||||
volumeSnapshotLocationLister velerov1listers.VolumeSnapshotLocationLister,
|
||||
defaultSnapshotLocations map[string]string,
|
||||
metrics *metrics.ServerMetrics,
|
||||
@@ -135,6 +137,7 @@ func NewBackupController(
|
||||
defaultBackupLocation: defaultBackupLocation,
|
||||
defaultVolumesToRestic: defaultVolumesToRestic,
|
||||
defaultBackupTTL: defaultBackupTTL,
|
||||
defaultCSISnapshotTimeout: defaultCSISnapshotTimeout,
|
||||
snapshotLocationLister: volumeSnapshotLocationLister,
|
||||
defaultSnapshotLocations: defaultSnapshotLocations,
|
||||
metrics: metrics,
|
||||
@@ -359,6 +362,11 @@ func (c *backupController) prepareBackupRequest(backup *velerov1api.Backup) *pkg
|
||||
request.Spec.TTL.Duration = c.defaultBackupTTL
|
||||
}
|
||||
|
||||
if request.Spec.CSISnapshotTimeout.Duration == 0 {
|
||||
// set default CSI VolumeSnapshot timeout
|
||||
request.Spec.CSISnapshotTimeout.Duration = c.defaultCSISnapshotTimeout
|
||||
}
|
||||
|
||||
// calculate expiration
|
||||
request.Status.Expiration = &metav1.Time{Time: c.clock.Now().Add(request.Spec.TTL.Duration)}
|
||||
|
||||
@@ -638,7 +646,7 @@ func (c *backupController) runBackup(backup *pkgbackup.Request) error {
|
||||
backupLog.Error(err)
|
||||
}
|
||||
|
||||
err = c.checkVolumeSnapshotReadyToUse(context.Background(), volumeSnapshots)
|
||||
err = c.checkVolumeSnapshotReadyToUse(context.Background(), volumeSnapshots, backup.Spec.CSISnapshotTimeout.Duration)
|
||||
if err != nil {
|
||||
backupLog.Errorf("fail to wait VolumeSnapshot change to Ready: %s", err.Error())
|
||||
}
|
||||
@@ -879,9 +887,10 @@ func encodeToJSONGzip(data interface{}, desc string) (*bytes.Buffer, []error) {
|
||||
// using goroutine here instead of waiting in CSI plugin, because it's not easy to make BackupItemAction
|
||||
// parallel by now. After BackupItemAction parallel is implemented, this logic should be moved to CSI plugin
|
||||
// as https://github.com/vmware-tanzu/velero-plugin-for-csi/pull/100
|
||||
func (c *backupController) checkVolumeSnapshotReadyToUse(ctx context.Context, volumesnapshots []*snapshotv1api.VolumeSnapshot) error {
|
||||
func (c *backupController) checkVolumeSnapshotReadyToUse(ctx context.Context, volumesnapshots []*snapshotv1api.VolumeSnapshot,
|
||||
csiSnapshotTimeout time.Duration) error {
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
timeout := 10 * time.Minute
|
||||
timeout := csiSnapshotTimeout
|
||||
interval := 5 * time.Second
|
||||
|
||||
for _, vs := range volumesnapshots {
|
||||
@@ -931,6 +940,10 @@ func (c *backupController) deleteVolumeSnapshot(volumeSnapshots []*snapshotv1api
|
||||
if vs.Status.BoundVolumeSnapshotContentName != nil &&
|
||||
len(*vs.Status.BoundVolumeSnapshotContentName) > 0 {
|
||||
vsc = vscMap[*vs.Status.BoundVolumeSnapshotContentName]
|
||||
if nil == vsc {
|
||||
logger.Errorf("Not find %s from the vscMap", vs.Status.BoundVolumeSnapshotContentName)
|
||||
return
|
||||
}
|
||||
if vsc.Spec.DeletionPolicy == snapshotv1api.VolumeSnapshotContentDelete {
|
||||
modifyVSCFlag = true
|
||||
}
|
||||
|
||||
@@ -24,12 +24,10 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/storage"
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
@@ -39,7 +37,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
backupStorageLocationSyncPeriod = 1 * time.Minute
|
||||
// keep the enqueue period a smaller value to make sure the BSL can be validated as expected.
|
||||
// The BSL validation frequency is 1 minute by default, if we set the enqueue period as 1 minute,
|
||||
// this will cause the actual validation interval for each BSL to be 2 minutes
|
||||
bslValidationEnqueuePeriod = 10 * time.Second
|
||||
)
|
||||
|
||||
// BackupStorageLocationReconciler reconciles a BackupStorageLocation object
|
||||
@@ -185,7 +186,7 @@ func (r *BackupStorageLocationReconciler) SetupWithManager(mgr ctrl.Manager) err
|
||||
r.Log,
|
||||
mgr.GetClient(),
|
||||
&velerov1api.BackupStorageLocationList{},
|
||||
backupStorageLocationSyncPeriod,
|
||||
bslValidationEnqueuePeriod,
|
||||
// Add filter function to enqueue BSL per ValidationFrequency setting.
|
||||
func(object client.Object) bool {
|
||||
location := object.(*velerov1api.BackupStorageLocation)
|
||||
@@ -193,22 +194,8 @@ func (r *BackupStorageLocationReconciler) SetupWithManager(mgr ctrl.Manager) err
|
||||
},
|
||||
)
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&velerov1api.BackupStorageLocation{}).
|
||||
// Handle BSL's creation event and spec update event to let changed BSL got validation immediately.
|
||||
WithEventFilter(predicate.Funcs{
|
||||
CreateFunc: func(ce event.CreateEvent) bool {
|
||||
return true
|
||||
},
|
||||
UpdateFunc: func(ue event.UpdateEvent) bool {
|
||||
return ue.ObjectNew.GetGeneration() != ue.ObjectOld.GetGeneration()
|
||||
},
|
||||
DeleteFunc: func(de event.DeleteEvent) bool {
|
||||
return false
|
||||
},
|
||||
GenericFunc: func(ge event.GenericEvent) bool {
|
||||
return false
|
||||
},
|
||||
}).
|
||||
// As the "status.LastValidationTime" field is always updated, this triggers new reconciling process, skip the update event that include no spec change to avoid the reconcile loop
|
||||
For(&velerov1api.BackupStorageLocation{}, builder.WithPredicates(kube.SpecChangePredicate{})).
|
||||
Watches(g, nil).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
@@ -124,7 +124,11 @@ func (r *PodVolumeBackupReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
||||
if err != nil {
|
||||
return r.updateStatusToFailed(ctx, &pvb, err, "building Restic command", log)
|
||||
}
|
||||
defer os.Remove(resticDetails.credsFile)
|
||||
|
||||
defer func() {
|
||||
os.Remove(resticDetails.credsFile)
|
||||
os.Remove(resticDetails.caCertFile)
|
||||
}()
|
||||
|
||||
backupLocation := &velerov1api.BackupStorageLocation{}
|
||||
if err := r.Client.Get(context.Background(), client.ObjectKey{
|
||||
@@ -204,19 +208,6 @@ func (r *PodVolumeBackupReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r *PodVolumeBackupReconciler) singlePathMatch(path string) (string, error) {
|
||||
matches, err := r.FileSystem.Glob(path)
|
||||
if err != nil {
|
||||
return "", errors.WithStack(err)
|
||||
}
|
||||
|
||||
if len(matches) != 1 {
|
||||
return "", errors.Errorf("expected one matching path: %s, got %d", path, len(matches))
|
||||
}
|
||||
|
||||
return matches[0], nil
|
||||
}
|
||||
|
||||
// getParentSnapshot finds the most recent completed PodVolumeBackup for the
|
||||
// specified PVC and returns its Restic snapshot ID. Any errors encountered are
|
||||
// logged but not returned since they do not prevent a backup from proceeding.
|
||||
@@ -237,7 +228,7 @@ func (r *PodVolumeBackupReconciler) getParentSnapshot(ctx context.Context, log l
|
||||
|
||||
// Go through all the podvolumebackups for the PVC and look for the most
|
||||
// recent completed one to use as the parent.
|
||||
var mostRecentPVB *velerov1api.PodVolumeBackup
|
||||
var mostRecentPVB velerov1api.PodVolumeBackup
|
||||
for _, pvb := range pvbList.Items {
|
||||
if pvb.Status.Phase != velerov1api.PodVolumeBackupPhaseCompleted {
|
||||
continue
|
||||
@@ -254,12 +245,12 @@ func (r *PodVolumeBackupReconciler) getParentSnapshot(ctx context.Context, log l
|
||||
continue
|
||||
}
|
||||
|
||||
if mostRecentPVB == nil || pvb.Status.StartTimestamp.After(mostRecentPVB.Status.StartTimestamp.Time) {
|
||||
mostRecentPVB = &pvb
|
||||
if mostRecentPVB.Status == (velerov1api.PodVolumeBackupStatus{}) || pvb.Status.StartTimestamp.After(mostRecentPVB.Status.StartTimestamp.Time) {
|
||||
mostRecentPVB = pvb
|
||||
}
|
||||
}
|
||||
|
||||
if mostRecentPVB == nil {
|
||||
if mostRecentPVB.Status == (velerov1api.PodVolumeBackupStatus{}) {
|
||||
log.Info("No completed PodVolumeBackup found for PVC")
|
||||
return ""
|
||||
}
|
||||
@@ -313,7 +304,7 @@ func (r *PodVolumeBackupReconciler) buildResticCommand(ctx context.Context, log
|
||||
pathGlob := fmt.Sprintf("/host_pods/%s/volumes/*/%s", string(pvb.Spec.Pod.UID), volDir)
|
||||
log.WithField("pathGlob", pathGlob).Debug("Looking for path matching glob")
|
||||
|
||||
path, err := r.singlePathMatch(pathGlob)
|
||||
path, err := kube.SinglePathMatch(pathGlob, r.FileSystem, log)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "identifying unique volume path on host")
|
||||
}
|
||||
@@ -344,8 +335,6 @@ func (r *PodVolumeBackupReconciler) buildResticCommand(ctx context.Context, log
|
||||
if err != nil {
|
||||
log.WithError(err).Error("creating temporary caCert file")
|
||||
}
|
||||
defer os.Remove(details.caCertFile)
|
||||
|
||||
}
|
||||
cmd.CACertFile = details.caCertFile
|
||||
|
||||
|
||||
@@ -215,19 +215,6 @@ func getResticInitContainerIndex(pod *corev1api.Pod) int {
|
||||
return -1
|
||||
}
|
||||
|
||||
func singlePathMatch(path string) (string, error) {
|
||||
matches, err := filepath.Glob(path)
|
||||
if err != nil {
|
||||
return "", errors.WithStack(err)
|
||||
}
|
||||
|
||||
if len(matches) != 1 {
|
||||
return "", errors.Errorf("expected one matching path: %s, got %d", path, len(matches))
|
||||
}
|
||||
|
||||
return matches[0], nil
|
||||
}
|
||||
|
||||
func (c *PodVolumeRestoreReconciler) processRestore(ctx context.Context, req *velerov1api.PodVolumeRestore, pod *corev1api.Pod, log logrus.FieldLogger) error {
|
||||
volumeDir, err := kube.GetVolumeDirectory(ctx, log, pod, req.Spec.Volume, c.Client)
|
||||
if err != nil {
|
||||
@@ -236,7 +223,9 @@ func (c *PodVolumeRestoreReconciler) processRestore(ctx context.Context, req *ve
|
||||
|
||||
// Get the full path of the new volume's directory as mounted in the daemonset pod, which
|
||||
// will look like: /host_pods/<new-pod-uid>/volumes/<volume-plugin-name>/<volume-dir>
|
||||
volumePath, err := singlePathMatch(fmt.Sprintf("/host_pods/%s/volumes/*/%s", string(req.Spec.Pod.UID), volumeDir))
|
||||
volumePath, err := kube.SinglePathMatch(
|
||||
fmt.Sprintf("/host_pods/%s/volumes/*/%s", string(req.Spec.Pod.UID), volumeDir),
|
||||
c.fileSystem, log)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error identifying path of volume")
|
||||
}
|
||||
|
||||
@@ -131,19 +131,25 @@ func (b *objectBackupStoreGetter) Get(location *velerov1api.BackupStorageLocatio
|
||||
return nil, errors.Errorf("backup storage location's bucket name %q must not contain a '/' (if using a prefix, put it in the 'Prefix' field instead)", location.Spec.ObjectStorage.Bucket)
|
||||
}
|
||||
|
||||
// Pass a new map into the object store rather than modifying the passed-in
|
||||
// location. This prevents Velero controllers from accidentally modifying
|
||||
// the in-cluster BSL with data which doesn't belong in Spec.Config
|
||||
objectStoreConfig := make(map[string]string)
|
||||
if location.Spec.Config != nil {
|
||||
for key, val := range location.Spec.Config {
|
||||
objectStoreConfig[key] = val
|
||||
}
|
||||
}
|
||||
|
||||
// add the bucket name and prefix to the config map so that object stores
|
||||
// can use them when initializing. The AWS object store uses the bucket
|
||||
// name to determine the bucket's region when setting up its client.
|
||||
if location.Spec.Config == nil {
|
||||
location.Spec.Config = make(map[string]string)
|
||||
}
|
||||
|
||||
location.Spec.Config["bucket"] = bucket
|
||||
location.Spec.Config["prefix"] = prefix
|
||||
objectStoreConfig["bucket"] = bucket
|
||||
objectStoreConfig["prefix"] = prefix
|
||||
|
||||
// Only include a CACert if it's specified in order to maintain compatibility with plugins that don't expect it.
|
||||
if location.Spec.ObjectStorage.CACert != nil {
|
||||
location.Spec.Config["caCert"] = string(location.Spec.ObjectStorage.CACert)
|
||||
objectStoreConfig["caCert"] = string(location.Spec.ObjectStorage.CACert)
|
||||
}
|
||||
|
||||
// If the BSL specifies a credential, fetch its path on disk and pass to
|
||||
@@ -154,7 +160,7 @@ func (b *objectBackupStoreGetter) Get(location *velerov1api.BackupStorageLocatio
|
||||
return nil, errors.Wrap(err, "unable to get credentials")
|
||||
}
|
||||
|
||||
location.Spec.Config["credentialsFile"] = credsFile
|
||||
objectStoreConfig["credentialsFile"] = credsFile
|
||||
}
|
||||
|
||||
objectStore, err := objectStoreGetter.GetObjectStore(location.Spec.Provider)
|
||||
@@ -162,7 +168,7 @@ func (b *objectBackupStoreGetter) Get(location *velerov1api.BackupStorageLocatio
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := objectStore.Init(location.Spec.Config); err != nil {
|
||||
if err := objectStore.Init(objectStoreConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -520,6 +526,10 @@ func (s *objectBackupStore) GetDownloadURL(target velerov1api.DownloadTarget) (s
|
||||
return s.objectStore.CreateSignedURL(s.bucket, s.layout.getRestoreLogKey(target.Name), DownloadURLTTL)
|
||||
case velerov1api.DownloadTargetKindRestoreResults:
|
||||
return s.objectStore.CreateSignedURL(s.bucket, s.layout.getRestoreResultsKey(target.Name), DownloadURLTTL)
|
||||
case velerov1api.DownloadTargetKindCSIBackupVolumeSnapshots:
|
||||
return s.objectStore.CreateSignedURL(s.bucket, s.layout.getCSIVolumeSnapshotKey(target.Name), DownloadURLTTL)
|
||||
case velerov1api.DownloadTargetKindCSIBackupVolumeSnapshotContents:
|
||||
return s.objectStore.CreateSignedURL(s.bucket, s.layout.getCSIVolumeSnapshotContentsKey(target.Name), DownloadURLTTL)
|
||||
default:
|
||||
return "", errors.Errorf("unsupported download target kind %q", target.Kind)
|
||||
}
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
veleroflag "github.com/vmware-tanzu/velero/pkg/cmd/util/flag"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/logging"
|
||||
)
|
||||
|
||||
@@ -78,6 +77,7 @@ type Server interface {
|
||||
|
||||
// RegisterItemSnapshotters registers multiple Item Snapshotters
|
||||
RegisterItemSnapshotters(map[string]HandlerInitializer) Server
|
||||
|
||||
// Server runs the plugin server.
|
||||
Serve()
|
||||
}
|
||||
@@ -87,7 +87,6 @@ type server struct {
|
||||
log *logrus.Logger
|
||||
logLevelFlag *logging.LevelFlag
|
||||
flagSet *pflag.FlagSet
|
||||
featureSet *veleroflag.StringArray
|
||||
backupItemAction *BackupItemActionPlugin
|
||||
volumeSnapshotter *VolumeSnapshotterPlugin
|
||||
objectStore *ObjectStorePlugin
|
||||
@@ -99,12 +98,10 @@ type server struct {
|
||||
// NewServer returns a new Server
|
||||
func NewServer() Server {
|
||||
log := newLogger()
|
||||
features := veleroflag.NewStringArray()
|
||||
|
||||
return &server{
|
||||
log: log,
|
||||
logLevelFlag: logging.LogLevelFlag(log.Level),
|
||||
featureSet: &features,
|
||||
backupItemAction: NewBackupItemActionPlugin(serverLogger(log)),
|
||||
volumeSnapshotter: NewVolumeSnapshotterPlugin(serverLogger(log)),
|
||||
objectStore: NewObjectStorePlugin(serverLogger(log)),
|
||||
@@ -116,7 +113,6 @@ func NewServer() Server {
|
||||
|
||||
func (s *server) BindFlags(flags *pflag.FlagSet) Server {
|
||||
flags.Var(s.logLevelFlag, "log-level", fmt.Sprintf("The level at which to log. Valid values are %s.", strings.Join(s.logLevelFlag.AllowedValues(), ", ")))
|
||||
flags.Var(s.featureSet, "features", "List of feature flags for this plugin")
|
||||
s.flagSet = flags
|
||||
s.flagSet.ParseErrorsWhitelist.UnknownFlags = true
|
||||
|
||||
|
||||
@@ -176,7 +176,7 @@ func (r *repositoryEnsurer) EnsureRepo(ctx context.Context, namespace, volumeNam
|
||||
select {
|
||||
// repositories should become either ready or not ready quickly if they're
|
||||
// newly created.
|
||||
case <-time.After(time.Minute):
|
||||
case <-time.After(time.Minute * 5):
|
||||
return nil, errors.New("timed out waiting for restic repository to become ready")
|
||||
case <-ctx.Done():
|
||||
return nil, errors.New("timed out waiting for restic repository to become ready")
|
||||
|
||||
@@ -99,7 +99,7 @@ func (a *ChangeStorageClassAction) Execute(input *velero.RestoreItemActionExecut
|
||||
|
||||
if len(sts.Spec.VolumeClaimTemplates) > 0 {
|
||||
for index, pvc := range sts.Spec.VolumeClaimTemplates {
|
||||
exists, newStorageClass, err := a.isStorageClassExist(log, *pvc.Spec.StorageClassName, config)
|
||||
exists, newStorageClass, err := a.isStorageClassExist(log, pvc.Spec.StorageClassName, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if !exists {
|
||||
@@ -124,7 +124,7 @@ func (a *ChangeStorageClassAction) Execute(input *velero.RestoreItemActionExecut
|
||||
return nil, errors.Wrap(err, "error getting item's spec.storageClassName")
|
||||
}
|
||||
|
||||
exists, newStorageClass, err := a.isStorageClassExist(log, storageClass, config)
|
||||
exists, newStorageClass, err := a.isStorageClassExist(log, &storageClass, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if !exists {
|
||||
@@ -140,15 +140,15 @@ func (a *ChangeStorageClassAction) Execute(input *velero.RestoreItemActionExecut
|
||||
return velero.NewRestoreItemActionExecuteOutput(obj), nil
|
||||
}
|
||||
|
||||
func (a *ChangeStorageClassAction) isStorageClassExist(log *logrus.Entry, storageClass string, cm *corev1.ConfigMap) (exists bool, newStorageClass string, err error) {
|
||||
if storageClass == "" {
|
||||
func (a *ChangeStorageClassAction) isStorageClassExist(log *logrus.Entry, storageClass *string, cm *corev1.ConfigMap) (exists bool, newStorageClass string, err error) {
|
||||
if storageClass == nil || *storageClass == "" {
|
||||
log.Debug("Item has no storage class specified")
|
||||
return false, "", nil
|
||||
}
|
||||
|
||||
newStorageClass, ok := cm.Data[storageClass]
|
||||
newStorageClass, ok := cm.Data[*storageClass]
|
||||
if !ok {
|
||||
log.Debugf("No mapping found for storage class %s", storageClass)
|
||||
log.Debugf("No mapping found for storage class %s", *storageClass)
|
||||
return false, "", nil
|
||||
}
|
||||
|
||||
|
||||
@@ -207,11 +207,8 @@ func (kr *kubernetesRestorer) RestoreWithResolvers(
|
||||
)
|
||||
|
||||
// Get resource status includes-excludes. Defaults to excluding all resources
|
||||
restoreStatusIncludesExcludes := collections.GetResourceIncludesExcludes(
|
||||
kr.discoveryHelper,
|
||||
[]string{},
|
||||
[]string{"*"},
|
||||
)
|
||||
var restoreStatusIncludesExcludes *collections.IncludesExcludes
|
||||
|
||||
if req.Restore.Spec.RestoreStatus != nil {
|
||||
restoreStatusIncludesExcludes = collections.GetResourceIncludesExcludes(
|
||||
kr.discoveryHelper,
|
||||
@@ -1252,12 +1249,31 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
errs.Add(namespace, err)
|
||||
return warnings, errs
|
||||
}
|
||||
if isAlreadyExistsError {
|
||||
fromCluster, err := resourceClient.Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
ctx.log.Infof("Error retrieving cluster version of %s: %v", kube.NamespaceAndName(obj), err)
|
||||
warnings.Add(namespace, err)
|
||||
return warnings, errs
|
||||
|
||||
// check if we want to treat the error as a warning, in some cases the creation call might not get executed due to object API validations
|
||||
// and Velero might not get the already exists error type but in reality the object already exists
|
||||
objectExists := false
|
||||
var fromCluster *unstructured.Unstructured
|
||||
|
||||
if restoreErr != nil && !isAlreadyExistsError {
|
||||
// check for the existence of the object in cluster, if no error then it implies that object exists
|
||||
// and if err then we want to fallthrough and do another get call later
|
||||
fromCluster, err = resourceClient.Get(name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
objectExists = true
|
||||
}
|
||||
}
|
||||
|
||||
if isAlreadyExistsError || objectExists {
|
||||
// do a get call if we did not run this previously i.e.
|
||||
// we've only run this for errors other than isAlreadyExistError
|
||||
if fromCluster == nil {
|
||||
fromCluster, err = resourceClient.Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
ctx.log.Errorf("Error retrieving cluster version of %s: %v", kube.NamespaceAndName(obj), err)
|
||||
errs.Add(namespace, err)
|
||||
return warnings, errs
|
||||
}
|
||||
}
|
||||
// Remove insubstantial metadata.
|
||||
fromCluster, err = resetMetadataAndStatus(fromCluster)
|
||||
@@ -1362,13 +1378,14 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
return warnings, errs
|
||||
}
|
||||
|
||||
shouldRestoreStatus := ctx.resourceStatusIncludesExcludes.ShouldInclude(groupResource.String())
|
||||
shouldRestoreStatus := ctx.resourceStatusIncludesExcludes != nil && ctx.resourceStatusIncludesExcludes.ShouldInclude(groupResource.String())
|
||||
if shouldRestoreStatus && statusFieldErr != nil {
|
||||
err := fmt.Errorf("could not get status to be restored %s: %v", kube.NamespaceAndName(obj), statusFieldErr)
|
||||
ctx.log.Errorf(err.Error())
|
||||
errs.Add(namespace, err)
|
||||
return warnings, errs
|
||||
}
|
||||
ctx.log.Debugf("status field for %s: exists: %v, should restore: %v", groupResource, statusFieldExists, shouldRestoreStatus)
|
||||
// if it should restore status, run a UpdateStatus
|
||||
if statusFieldExists && shouldRestoreStatus {
|
||||
if err := unstructured.SetNestedField(obj.Object, objStatus, "status"); err != nil {
|
||||
@@ -1379,6 +1396,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
obj.SetResourceVersion(createdObj.GetResourceVersion())
|
||||
updated, err := resourceClient.UpdateStatus(obj, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
ctx.log.Infof("status field update failed %s: %v", kube.NamespaceAndName(obj), err)
|
||||
warnings.Add(namespace, err)
|
||||
} else {
|
||||
createdObj = updated
|
||||
@@ -2025,7 +2043,7 @@ func (ctx *restoreContext) processUpdateResourcePolicy(fromCluster, fromClusterW
|
||||
// try patching the in-cluster resource (resource diff plus latest backup/restore labels)
|
||||
_, err = resourceClient.Patch(obj.GetName(), patchBytes)
|
||||
if err != nil {
|
||||
ctx.log.Errorf("patch attempt failed for %s %s: %v", fromCluster.GroupVersionKind(), kube.NamespaceAndName(fromCluster), err)
|
||||
ctx.log.Warnf("patch attempt failed for %s %s: %v", fromCluster.GroupVersionKind(), kube.NamespaceAndName(fromCluster), err)
|
||||
warnings.Add(namespace, err)
|
||||
// try just patching the labels
|
||||
warningsFromUpdate, errsFromUpdate := ctx.updateBackupRestoreLabels(fromCluster, fromClusterWithLabels, namespace, resourceClient)
|
||||
|
||||
47
pkg/util/kube/predicate.go
Normal file
47
pkg/util/kube/predicate.go
Normal file
@@ -0,0 +1,47 @@
|
||||
/*
|
||||
Copyright the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kube
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
)
|
||||
|
||||
// SpecChangePredicate implements a default update predicate function on Spec change
|
||||
// As Velero doesn't enable subresource in CRDs, we cannot use the object's metadata.generation field to check the spec change
|
||||
// More details about the generation field refer to https://github.com/kubernetes-sigs/controller-runtime/blob/v0.12.2/pkg/predicate/predicate.go#L156
|
||||
type SpecChangePredicate struct {
|
||||
predicate.Funcs
|
||||
}
|
||||
|
||||
func (SpecChangePredicate) Update(e event.UpdateEvent) bool {
|
||||
if e.ObjectOld == nil {
|
||||
return false
|
||||
}
|
||||
if e.ObjectNew == nil {
|
||||
return false
|
||||
}
|
||||
oldSpec := reflect.ValueOf(e.ObjectOld).Elem().FieldByName("Spec")
|
||||
// contains no field named "Spec", return false directly
|
||||
if oldSpec.IsZero() {
|
||||
return false
|
||||
}
|
||||
newSpec := reflect.ValueOf(e.ObjectNew).Elem().FieldByName("Spec")
|
||||
return !reflect.DeepEqual(oldSpec.Interface(), newSpec.Interface())
|
||||
}
|
||||
180
pkg/util/kube/predicate_test.go
Normal file
180
pkg/util/kube/predicate_test.go
Normal file
@@ -0,0 +1,180 @@
|
||||
/*
|
||||
Copyright the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kube
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
|
||||
velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
)
|
||||
|
||||
func TestSpecChangePredicate(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
oldObj client.Object
|
||||
newObj client.Object
|
||||
changed bool
|
||||
}{
|
||||
{
|
||||
name: "Contains no spec field",
|
||||
oldObj: &velerov1.BackupStorageLocation{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "bsl01",
|
||||
},
|
||||
},
|
||||
newObj: &velerov1.BackupStorageLocation{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "bsl01",
|
||||
},
|
||||
},
|
||||
changed: false,
|
||||
},
|
||||
{
|
||||
name: "ObjectMetas are different, Specs are same",
|
||||
oldObj: &velerov1.BackupStorageLocation{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "bsl01",
|
||||
Annotations: map[string]string{"key1": "value1"},
|
||||
},
|
||||
Spec: velerov1.BackupStorageLocationSpec{
|
||||
Provider: "azure",
|
||||
},
|
||||
},
|
||||
newObj: &velerov1.BackupStorageLocation{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "bsl01",
|
||||
Annotations: map[string]string{"key2": "value2"},
|
||||
},
|
||||
Spec: velerov1.BackupStorageLocationSpec{
|
||||
Provider: "azure",
|
||||
},
|
||||
},
|
||||
changed: false,
|
||||
},
|
||||
{
|
||||
name: "Statuses are different, Specs are same",
|
||||
oldObj: &velerov1.BackupStorageLocation{
|
||||
Spec: velerov1.BackupStorageLocationSpec{
|
||||
Provider: "azure",
|
||||
},
|
||||
Status: velerov1.BackupStorageLocationStatus{
|
||||
Phase: velerov1.BackupStorageLocationPhaseAvailable,
|
||||
},
|
||||
},
|
||||
newObj: &velerov1.BackupStorageLocation{
|
||||
Spec: velerov1.BackupStorageLocationSpec{
|
||||
Provider: "azure",
|
||||
},
|
||||
Status: velerov1.BackupStorageLocationStatus{
|
||||
Phase: velerov1.BackupStorageLocationPhaseUnavailable,
|
||||
},
|
||||
},
|
||||
changed: false,
|
||||
},
|
||||
{
|
||||
name: "Specs are different",
|
||||
oldObj: &velerov1.BackupStorageLocation{
|
||||
Spec: velerov1.BackupStorageLocationSpec{
|
||||
Provider: "azure",
|
||||
},
|
||||
},
|
||||
newObj: &velerov1.BackupStorageLocation{
|
||||
Spec: velerov1.BackupStorageLocationSpec{
|
||||
Provider: "aws",
|
||||
},
|
||||
},
|
||||
changed: true,
|
||||
},
|
||||
{
|
||||
name: "Specs are same",
|
||||
oldObj: &velerov1.BackupStorageLocation{
|
||||
Spec: velerov1.BackupStorageLocationSpec{
|
||||
Provider: "azure",
|
||||
Config: map[string]string{"key": "value"},
|
||||
Credential: &corev1api.SecretKeySelector{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "secret",
|
||||
},
|
||||
Key: "credential",
|
||||
},
|
||||
StorageType: velerov1.StorageType{
|
||||
ObjectStorage: &velerov1.ObjectStorageLocation{
|
||||
Bucket: "bucket1",
|
||||
Prefix: "prefix",
|
||||
CACert: []byte{'a'},
|
||||
},
|
||||
},
|
||||
Default: true,
|
||||
AccessMode: velerov1.BackupStorageLocationAccessModeReadWrite,
|
||||
BackupSyncPeriod: &metav1.Duration{
|
||||
Duration: 1 * time.Minute,
|
||||
},
|
||||
ValidationFrequency: &metav1.Duration{
|
||||
Duration: 1 * time.Minute,
|
||||
},
|
||||
},
|
||||
},
|
||||
newObj: &velerov1.BackupStorageLocation{
|
||||
Spec: velerov1.BackupStorageLocationSpec{
|
||||
Provider: "azure",
|
||||
Config: map[string]string{"key": "value"},
|
||||
Credential: &corev1api.SecretKeySelector{
|
||||
LocalObjectReference: corev1api.LocalObjectReference{
|
||||
Name: "secret",
|
||||
},
|
||||
Key: "credential",
|
||||
},
|
||||
StorageType: velerov1.StorageType{
|
||||
ObjectStorage: &velerov1.ObjectStorageLocation{
|
||||
Bucket: "bucket1",
|
||||
Prefix: "prefix",
|
||||
CACert: []byte{'a'},
|
||||
},
|
||||
},
|
||||
Default: true,
|
||||
AccessMode: velerov1.BackupStorageLocationAccessModeReadWrite,
|
||||
BackupSyncPeriod: &metav1.Duration{
|
||||
Duration: 1 * time.Minute,
|
||||
},
|
||||
ValidationFrequency: &metav1.Duration{
|
||||
Duration: 1 * time.Minute,
|
||||
},
|
||||
},
|
||||
},
|
||||
changed: false,
|
||||
},
|
||||
}
|
||||
|
||||
predicate := SpecChangePredicate{}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
changed := predicate.Update(event.UpdateEvent{
|
||||
ObjectOld: c.oldObj,
|
||||
ObjectNew: c.newObj,
|
||||
})
|
||||
assert.Equal(t, c.changed, changed)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -34,15 +34,20 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
|
||||
)
|
||||
|
||||
// These annotations are taken from the Kubernetes persistent volume/persistent volume claim controller.
|
||||
// They cannot be directly importing because they are part of the kubernetes/kubernetes package, and importing that package is unsupported.
|
||||
// Their values are well-known and slow changing. They're duplicated here as constants to provide compile-time checking.
|
||||
// Originals can be found in kubernetes/kubernetes/pkg/controller/volume/persistentvolume/util/util.go.
|
||||
const KubeAnnBindCompleted = "pv.kubernetes.io/bind-completed"
|
||||
const KubeAnnBoundByController = "pv.kubernetes.io/bound-by-controller"
|
||||
const KubeAnnDynamicallyProvisioned = "pv.kubernetes.io/provisioned-by"
|
||||
const (
|
||||
KubeAnnBindCompleted = "pv.kubernetes.io/bind-completed"
|
||||
KubeAnnBoundByController = "pv.kubernetes.io/bound-by-controller"
|
||||
KubeAnnDynamicallyProvisioned = "pv.kubernetes.io/provisioned-by"
|
||||
KubeAnnMigratedTo = "pv.kubernetes.io/migrated-to"
|
||||
)
|
||||
|
||||
// NamespaceAndName returns a string in the format <namespace>/<name>
|
||||
func NamespaceAndName(objMeta metav1.Object) string {
|
||||
@@ -163,6 +168,9 @@ func GetVolumeDirectory(ctx context.Context, log logrus.FieldLogger, pod *corev1
|
||||
return pvc.Spec.VolumeName, nil
|
||||
}
|
||||
|
||||
// isProvisionedByCSI function checks whether this is a CSI PV by annotation.
|
||||
// Either "pv.kubernetes.io/provisioned-by" or "pv.kubernetes.io/migrated-to" indicates
|
||||
// PV is provisioned by CSI.
|
||||
func isProvisionedByCSI(log logrus.FieldLogger, pv *corev1api.PersistentVolume, kbClient client.Client) (bool, error) {
|
||||
if pv.Spec.CSI != nil {
|
||||
return true, nil
|
||||
@@ -171,14 +179,15 @@ func isProvisionedByCSI(log logrus.FieldLogger, pv *corev1api.PersistentVolume,
|
||||
// Refer to https://github.com/vmware-tanzu/velero/issues/4496 for more details
|
||||
if pv.Annotations != nil {
|
||||
driverName := pv.Annotations[KubeAnnDynamicallyProvisioned]
|
||||
if len(driverName) > 0 {
|
||||
migratedDriver := pv.Annotations[KubeAnnMigratedTo]
|
||||
if len(driverName) > 0 || len(migratedDriver) > 0 {
|
||||
list := &storagev1api.CSIDriverList{}
|
||||
if err := kbClient.List(context.TODO(), list); err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, driver := range list.Items {
|
||||
if driverName == driver.Name {
|
||||
log.Debugf("the annotation %s=%s indicates the volume is provisioned by a CSI driver", KubeAnnDynamicallyProvisioned, driverName)
|
||||
if driverName == driver.Name || migratedDriver == driver.Name {
|
||||
log.Debugf("the annotation %s or %s equals to %s indicates the volume is provisioned by a CSI driver", KubeAnnDynamicallyProvisioned, KubeAnnMigratedTo, driverName)
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
@@ -187,6 +196,21 @@ func isProvisionedByCSI(log logrus.FieldLogger, pv *corev1api.PersistentVolume,
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// SinglePathMatch function will be called by PVB and PVR controller to check whether pass-in volume path is valid.
|
||||
// Check whether there is only one match by the path's pattern (/host_pods/%s/volumes/*/volume_name/[mount|]).
|
||||
func SinglePathMatch(path string, fs filesystem.Interface, log logrus.FieldLogger) (string, error) {
|
||||
matches, err := fs.Glob(path)
|
||||
if err != nil {
|
||||
return "", errors.WithStack(err)
|
||||
}
|
||||
if len(matches) != 1 {
|
||||
return "", errors.Errorf("expected one matching path: %s, got %d", path, len(matches))
|
||||
}
|
||||
|
||||
log.Debugf("This is a valid volume path: %s.", matches[0])
|
||||
return matches[0], nil
|
||||
}
|
||||
|
||||
// IsV1CRDReady checks a v1 CRD to see if it's ready, with both the Established and NamesAccepted conditions.
|
||||
func IsV1CRDReady(crd *apiextv1.CustomResourceDefinition) bool {
|
||||
var isEstablished, namesAccepted bool
|
||||
|
||||
@@ -197,6 +197,13 @@ func TestGetVolumeDirectorySuccess(t *testing.T) {
|
||||
pv: builder.ForPersistentVolume("a-pv").ObjectMeta(builder.WithAnnotations(KubeAnnDynamicallyProvisioned, "csi.test.com")).Result(),
|
||||
want: "a-pv/mount",
|
||||
},
|
||||
{
|
||||
name: "Volume with CSI annotation 'pv.kubernetes.io/migrated-to' appends '/mount' to the volume name",
|
||||
pod: builder.ForPod("ns-1", "my-pod").Volumes(builder.ForVolume("my-vol").PersistentVolumeClaimSource("my-pvc").Result()).Result(),
|
||||
pvc: builder.ForPersistentVolumeClaim("ns-1", "my-pvc").VolumeName("a-pv").Result(),
|
||||
pv: builder.ForPersistentVolume("a-pv").ObjectMeta(builder.WithAnnotations(KubeAnnMigratedTo, "csi.test.com")).Result(),
|
||||
want: "a-pv/mount",
|
||||
},
|
||||
}
|
||||
|
||||
csiDriver := storagev1api.CSIDriver{
|
||||
@@ -425,3 +432,13 @@ func TestIsCRDReady(t *testing.T) {
|
||||
_, err = IsCRDReady(obj)
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
|
||||
func TestSinglePathMatch(t *testing.T) {
|
||||
fakeFS := velerotest.NewFakeFileSystem()
|
||||
fakeFS.MkdirAll("testDir1/subpath", 0755)
|
||||
fakeFS.MkdirAll("testDir2/subpath", 0755)
|
||||
|
||||
_, err := SinglePathMatch("./*/subpath", fakeFS, logrus.StandardLogger())
|
||||
assert.NotNil(t, err)
|
||||
assert.Contains(t, err.Error(), "expected one matching path")
|
||||
}
|
||||
|
||||
@@ -29,6 +29,10 @@ metadata:
|
||||
namespace: velero
|
||||
# Parameters about the backup. Required.
|
||||
spec:
|
||||
# CSISnapshotTimeout specifies the time used to wait for
|
||||
# CSI VolumeSnapshot status turns to ReadyToUse during creation, before
|
||||
# returning error as timeout. The default value is 10 minute.
|
||||
csiSnapshotTimeout: 10m
|
||||
# Array of namespaces to include in the backup. If unspecified, all namespaces are included.
|
||||
# Optional.
|
||||
includedNamespaces:
|
||||
|
||||
@@ -34,6 +34,10 @@ spec:
|
||||
schedule: 0 7 * * *
|
||||
# Template is the spec that should be used for each backup triggered by this schedule.
|
||||
template:
|
||||
# CSISnapshotTimeout specifies the time used to wait for
|
||||
# CSI VolumeSnapshot status turns to ReadyToUse during creation, before
|
||||
# returning error as timeout. The default value is 10 minute.
|
||||
csiSnapshotTimeout: 10m
|
||||
# Array of namespaces to include in the scheduled backup. If unspecified, all namespaces are included.
|
||||
# Optional.
|
||||
includedNamespaces:
|
||||
|
||||
@@ -47,10 +47,10 @@ cd velero
|
||||
kubectl apply -f examples/nginx-app/with-pv.yaml
|
||||
```
|
||||
|
||||
1. Create a backup with PV snapshotting:
|
||||
1. Create a backup with PV snapshotting. `--csi-snapshot-timeout` is used to setup time to wait before CSI snapshot creation timeout. The default value is 10 minutes:
|
||||
|
||||
```bash
|
||||
velero backup create nginx-backup --include-namespaces nginx-example
|
||||
velero backup create nginx-backup --include-namespaces nginx-example --csi-snapshot-timeout=20m
|
||||
```
|
||||
|
||||
1. Simulate a disaster:
|
||||
|
||||
@@ -29,6 +29,11 @@ metadata:
|
||||
namespace: velero
|
||||
# Parameters about the backup. Required.
|
||||
spec:
|
||||
# Available since v1.9.1.
|
||||
# CSISnapshotTimeout specifies the time used to wait for
|
||||
# CSI VolumeSnapshot status turns to ReadyToUse during creation, before
|
||||
# returning error as timeout. The default value is 10 minute.
|
||||
csiSnapshotTimeout: 10m
|
||||
# Array of namespaces to include in the backup. If unspecified, all namespaces are included.
|
||||
# Optional.
|
||||
includedNamespaces:
|
||||
|
||||
@@ -34,6 +34,11 @@ spec:
|
||||
schedule: 0 7 * * *
|
||||
# Template is the spec that should be used for each backup triggered by this schedule.
|
||||
template:
|
||||
# Available since v1.9.1.
|
||||
# CSISnapshotTimeout specifies the time used to wait for
|
||||
# CSI VolumeSnapshot status turns to ReadyToUse during creation, before
|
||||
# returning error as timeout. The default value is 10 minute.
|
||||
csiSnapshotTimeout: 10m
|
||||
# Array of namespaces to include in the scheduled backup. If unspecified, all namespaces are included.
|
||||
# Optional.
|
||||
includedNamespaces:
|
||||
|
||||
@@ -221,7 +221,7 @@ func BslDeletionTest(useVolumeSnapshots bool) {
|
||||
snapshotCheckPoint, err = GetSnapshotCheckPoint(client, VeleroCfg, 1, bslDeletionTestNs, backupName_1, []string{podName_1})
|
||||
Expect(err).NotTo(HaveOccurred(), "Fail to get Azure CSI snapshot checkpoint")
|
||||
Expect(SnapshotsShouldBeCreatedInCloud(VeleroCfg.CloudProvider,
|
||||
VeleroCfg.CloudCredentialsFile, VeleroCfg.AdditionalBSLBucket,
|
||||
VeleroCfg.CloudCredentialsFile, VeleroCfg.BSLBucket,
|
||||
VeleroCfg.BSLConfig, backupName_1, snapshotCheckPoint)).To(Succeed())
|
||||
})
|
||||
By(fmt.Sprintf("Snapshot of bsl %s should be created in cloud object store", backupLocation_2), func() {
|
||||
@@ -232,6 +232,8 @@ func BslDeletionTest(useVolumeSnapshots bool) {
|
||||
BSLCredentials = VeleroCfg.AdditionalBSLCredentials
|
||||
BSLConfig = VeleroCfg.AdditionalBSLConfig
|
||||
} else {
|
||||
// Snapshotting by non-vSphere provider using credentials
|
||||
// and config in default BSL
|
||||
BSLCredentials = VeleroCfg.CloudCredentialsFile
|
||||
BSLConfig = VeleroCfg.BSLConfig
|
||||
}
|
||||
|
||||
@@ -111,7 +111,7 @@ var _ = Describe("[Schedule][OrederedResources] Backup resources should follow t
|
||||
func TestE2e(t *testing.T) {
|
||||
// Skip running E2E tests when running only "short" tests because:
|
||||
// 1. E2E tests are long running tests involving installation of Velero and performing backup and restore operations.
|
||||
// 2. E2E tests require a kubernetes cluster to install and run velero which further requires ore configuration. See above referenced command line flags.
|
||||
// 2. E2E tests require a kubernetes cluster to install and run velero which further requires more configuration. See above referenced command line flags.
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping E2E tests")
|
||||
}
|
||||
|
||||
@@ -36,9 +36,9 @@ type ObjectsInStorage interface {
|
||||
|
||||
func ObjectsShouldBeInBucket(cloudProvider, cloudCredentialsFile, bslBucket, bslPrefix, bslConfig, backupName, subPrefix string) error {
|
||||
fmt.Printf("|| VERIFICATION || - %s %s should exist in storage [%s]\n", subPrefix, backupName, bslPrefix)
|
||||
exist, _ := IsObjectsInBucket(cloudProvider, cloudCredentialsFile, bslBucket, bslPrefix, bslConfig, backupName, subPrefix)
|
||||
exist, err := IsObjectsInBucket(cloudProvider, cloudCredentialsFile, bslBucket, bslPrefix, bslConfig, backupName, subPrefix)
|
||||
if !exist {
|
||||
return errors.New(fmt.Sprintf("|| UNEXPECTED ||Backup object %s is not exist in object store after backup as expected\n", backupName))
|
||||
return errors.Wrap(err, fmt.Sprintf("|| UNEXPECTED ||Backup object %s is not exist in object store after backup as expected\n", backupName))
|
||||
}
|
||||
fmt.Printf("|| EXPECTED || - Backup %s exist in object storage bucket %s\n", backupName, bslBucket)
|
||||
return nil
|
||||
|
||||
@@ -87,6 +87,13 @@ var pluginsMatrix = map[string]map[string][]string{
|
||||
"gcp": {"velero/velero-plugin-for-gcp:v1.4.0"},
|
||||
"azure-csi": {"velero/velero-plugin-for-microsoft-azure:v1.4.0", "velero/velero-plugin-for-csi:v0.2.0"},
|
||||
},
|
||||
"v1.9": {
|
||||
"aws": {"velero/velero-plugin-for-aws:v1.5.0"},
|
||||
"azure": {"velero/velero-plugin-for-microsoft-azure:v1.5.0"},
|
||||
"vsphere": {"velero/velero-plugin-for-aws:v1.5.0", "vsphereveleroplugin/velero-plugin-for-vsphere:v1.4.0"},
|
||||
"gcp": {"velero/velero-plugin-for-gcp:v1.5.0"},
|
||||
"azure-csi": {"velero/velero-plugin-for-microsoft-azure:v1.5.0", "velero/velero-plugin-for-csi:v0.3.0"},
|
||||
},
|
||||
"main": {
|
||||
"aws": {"velero/velero-plugin-for-aws:main"},
|
||||
"azure": {"velero/velero-plugin-for-microsoft-azure:main"},
|
||||
|
||||
Reference in New Issue
Block a user