mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-28 23:52:08 +00:00
Compare commits
105 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
632c90bd08 | ||
|
|
17fcedefae | ||
|
|
992940c55c | ||
|
|
121b7153a7 | ||
|
|
bd8f433188 | ||
|
|
94ac3b3529 | ||
|
|
41ec1c540c | ||
|
|
8a0ac2117b | ||
|
|
cbcd15d603 | ||
|
|
3100e856a0 | ||
|
|
b66efd9416 | ||
|
|
038fa39451 | ||
|
|
7e3fc0884e | ||
|
|
995050390c | ||
|
|
0b6e78145c | ||
|
|
5b8562e73c | ||
|
|
dc484d1566 | ||
|
|
1f7e9b65e8 | ||
|
|
c129d1cec3 | ||
|
|
c700455272 | ||
|
|
99f67db39d | ||
|
|
526b604237 | ||
|
|
8e740faafc | ||
|
|
0c092eaa17 | ||
|
|
f0b35cc45a | ||
|
|
179b95c81d | ||
|
|
98d370f84d | ||
|
|
5dc50e4974 | ||
|
|
c2dc41efd8 | ||
|
|
062a5d7557 | ||
|
|
ad47513170 | ||
|
|
932b8259ae | ||
|
|
0f2d1ab82b | ||
|
|
2ce15de2f8 | ||
|
|
fc6da9b3db | ||
|
|
0d42815d31 | ||
|
|
194d21c6a3 | ||
|
|
c57a9b94b9 | ||
|
|
c2fa812ffb | ||
|
|
9996eec6be | ||
|
|
b184c0b348 | ||
|
|
01e9c86a01 | ||
|
|
eb6f1a7b5c | ||
|
|
4c481f4d23 | ||
|
|
af189fd5f4 | ||
|
|
34a6f492e5 | ||
|
|
38aa43885b | ||
|
|
015869cc29 | ||
|
|
7fb507689f | ||
|
|
8ba5a29679 | ||
|
|
24ce316788 | ||
|
|
3975187d57 | ||
|
|
cb49c62aaf | ||
|
|
35b46e392c | ||
|
|
21e2019540 | ||
|
|
71bb702297 | ||
|
|
e21c66c494 | ||
|
|
737b6d932a | ||
|
|
3f840d4ce5 | ||
|
|
390e47b08b | ||
|
|
962ea4708e | ||
|
|
72b0bdde70 | ||
|
|
4ee6b81647 | ||
|
|
5e4fc8f84a | ||
|
|
efa4e57d1e | ||
|
|
9471f9da3c | ||
|
|
8b25114047 | ||
|
|
b2d80471ac | ||
|
|
203a9c6e05 | ||
|
|
55c038afa0 | ||
|
|
7f959f0184 | ||
|
|
c49d11f17a | ||
|
|
e7703d88ec | ||
|
|
f28d008017 | ||
|
|
d87e8ee16e | ||
|
|
4f59b19cdc | ||
|
|
36a40a0cd3 | ||
|
|
073795715e | ||
|
|
7f78d5cbbc | ||
|
|
9401ca3c1a | ||
|
|
93b2f30e32 | ||
|
|
9c3d7f9098 | ||
|
|
0fab2e5e89 | ||
|
|
39f0a4e561 | ||
|
|
c1bc52eb65 | ||
|
|
64632e29f8 | ||
|
|
78dc641b15 | ||
|
|
eaf84eafb6 | ||
|
|
b8cd614122 | ||
|
|
aa253bf016 | ||
|
|
293674c40e | ||
|
|
322cbc19e6 | ||
|
|
15fe87aea3 | ||
|
|
4957dfce61 | ||
|
|
9249a13661 | ||
|
|
843345f728 | ||
|
|
43449885a1 | ||
|
|
18eafd5606 | ||
|
|
e5c8d3316f | ||
|
|
c3feb0489f | ||
|
|
1af01e28ef | ||
|
|
67811606c0 | ||
|
|
2cdd8448c2 | ||
|
|
1ac0303283 | ||
|
|
8a53cef7e6 |
6
.travis.yml
Normal file
6
.travis.yml
Normal file
@@ -0,0 +1,6 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.9.x
|
||||
|
||||
script: make ci
|
||||
34
CHANGELOG.md
34
CHANGELOG.md
@@ -1,5 +1,39 @@
|
||||
# Changelog
|
||||
|
||||
#### [v0.6.0](https://github.com/heptio/ark/tree/v0.6.0) - 2017-11-30
|
||||
|
||||
Highlights:
|
||||
* **Plugins** - We now support user-defined plugins that can extend Ark functionality to meet your custom backup/restore needs without needing to be compiled into the core binary. We support pluggable block and object stores as well as per-item backup and restore actions that can execute arbitrary logic, including modifying the items being backed up or restored. For more information see the [documentation](https://github.com/heptio/ark/docs/plugins.md), which includes a reference to a fully-functional sample plugin repository. (#174 #188 #206 #213 #215 #217 #223 #226)
|
||||
* **Describers** - The Ark CLI now includes `describe` commands for `backups`, `restores`, and `schedules` that provide human-friendly representations of the relevant API objects.
|
||||
|
||||
Breaking Changes:
|
||||
* The config object format has changed. In order to upgrade to v0.6.0, the config object will have to be updated to match the new format. See the [examples](https://github.com/heptio/ark/tree/master/examples) and [documentation](https://github.com/heptio/ark/blob/master/docs/config-definition.md) for more information.
|
||||
* The restore object format has changed. The `warnings` and `errors` fields are now ints containing the counts, while full warnings and errors are now stored in the object store instead of etcd. Restore objects created prior to v.0.6.0 should be deleted, or a new bucket used, and the old restore objects deleted from Kubernetes (`kubectl -n heptio-ark delete restore --all`).
|
||||
|
||||
All New Features:
|
||||
* Add `ark plugin add` and `ark plugin remove` commands #217, @skriss
|
||||
* Add plugin support for block/object stores, backup/restore item actions #174 #188 #206 #213 #215 #223 #226, @skriss @ncdc
|
||||
* Improve Azure deployment instructions #216, @ncdc
|
||||
* Change default TTL for backups to 30 days #204, @nrb
|
||||
* Improve logging for backups and restores #199, @ncdc
|
||||
* Add `ark backup describe`, `ark schedule describe` #196, @ncdc
|
||||
* Add `ark restore describe` and move restore warnings/errors to object storage #173 #201 #202, @ncdc
|
||||
* Upgrade to client-go v5.0.1, kubernetes v1.8.2 #157, @ncdc
|
||||
* Add Travis CI support #165 #166, @ncdc
|
||||
|
||||
Bug Fixes:
|
||||
* Fix log location hook prefix stripping #222, @ncdc
|
||||
* When running `ark backup download`, remove file if there's an error #154, @ncdc
|
||||
* Update documentation for AWS KMS Key alias support #163, @lli-hiya
|
||||
* Remove clock from `volume_snapshot_action` #137, @athampy
|
||||
|
||||
#### [v0.5.1](https://github.com/heptio/ark/tree/v0.5.1) - 2017-11-06
|
||||
Bug fixes:
|
||||
* If a Service is headless, retain ClusterIP = None when backing up and restoring.
|
||||
* Use the specifed --label-selector when listing backups, schedules, and restores.
|
||||
* Restore namespace mapping functionality that was accidentally broken in 0.5.0.
|
||||
* Always include namespaces in the backup, regardless of the --include-cluster-resources setting.
|
||||
|
||||
#### [v0.5.0](https://github.com/heptio/ark/tree/v0.5.0) - 2017-10-26
|
||||
Breaking changes:
|
||||
* The backup tar file format has changed. Backups created using previous versions of Ark cannot be restored using v0.5.0.
|
||||
|
||||
159
Gopkg.lock
generated
159
Gopkg.lock
generated
@@ -31,15 +31,9 @@
|
||||
packages = ["."]
|
||||
revision = "de5bf2ad457846296e2031421a34e2568e304e35"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/asaskevich/govalidator"
|
||||
packages = ["."]
|
||||
revision = "4918b99a7cb949bb295f3c7bbaf24b577d806e35"
|
||||
version = "v6"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
packages = ["aws","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/stscreds","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/shareddefaults","private/protocol","private/protocol/ec2query","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/ec2","service/s3","service/sts"]
|
||||
packages = ["aws","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/stscreds","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/shareddefaults","private/protocol","private/protocol/ec2query","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/ec2","service/s3","service/s3/s3iface","service/s3/s3manager","service/sts"]
|
||||
revision = "1850f427c33c2558a2118dc55c1cf95a633d7432"
|
||||
version = "v1.10.27"
|
||||
|
||||
@@ -61,12 +55,6 @@
|
||||
revision = "d2709f9f1f31ebcda9651b03077758c1f3a0018c"
|
||||
version = "v3.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/docker/distribution"
|
||||
packages = ["digest","reference"]
|
||||
revision = "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89"
|
||||
version = "v2.6.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/docker/spdystream"
|
||||
@@ -85,12 +73,6 @@
|
||||
revision = "dcef7f55730566d41eae5db10e7d6981829720f6"
|
||||
version = "1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/fatih/camelcase"
|
||||
packages = ["."]
|
||||
revision = "f6a740d52f961c60348ebb109adde9f4635d7540"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/ghodss/yaml"
|
||||
packages = ["."]
|
||||
@@ -103,18 +85,6 @@
|
||||
revision = "20b96f641a5ea98f2f8619ff4f3e061cff4833bd"
|
||||
version = "v1.28.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-openapi/analysis"
|
||||
packages = ["."]
|
||||
revision = "8ed83f2ea9f00f945516462951a288eaa68bf0d6"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-openapi/errors"
|
||||
packages = ["."]
|
||||
revision = "03cfca65330da08a5a440053faf994a3c682b5bf"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-openapi/jsonpointer"
|
||||
@@ -127,24 +97,12 @@
|
||||
packages = ["."]
|
||||
revision = "36d33bfe519efae5632669801b180bf1a245da3b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-openapi/loads"
|
||||
packages = ["."]
|
||||
revision = "a80dea3052f00e5f032e860dd7355cd0cc67e24d"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-openapi/spec"
|
||||
packages = ["."]
|
||||
revision = "3faa0055dbbf2110abc1f3b4e3adbb22721e96e7"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-openapi/strfmt"
|
||||
packages = ["."]
|
||||
revision = "93a31ef21ac23f317792fff78f9539219dd74619"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-openapi/swag"
|
||||
@@ -166,9 +124,15 @@
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto","protoc-gen-go/descriptor","ptypes/any"]
|
||||
packages = ["proto","protoc-gen-go/descriptor","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"]
|
||||
revision = "ab9f9a6dab164b7d1246e0e688b0ab7b94d8553e"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/google/btree"
|
||||
packages = ["."]
|
||||
revision = "316fb6d3f031ae8f4d457c6c5186b9e3ded70435"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/google/gofuzz"
|
||||
@@ -181,12 +145,42 @@
|
||||
packages = ["."]
|
||||
revision = "84ed26760e7f6f80887a2fbfb50db3cc415d2cea"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/googleapis/gnostic"
|
||||
packages = ["OpenAPIv2","compiler","extensions"]
|
||||
revision = "ee43cbb60db7bd22502942cccbc39059117352ab"
|
||||
version = "v0.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/gregjones/httpcache"
|
||||
packages = [".","diskcache"]
|
||||
revision = "c1f8028e62adb3d518b823a2f8e6a95c38bdd3aa"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/hashicorp/go-hclog"
|
||||
packages = ["."]
|
||||
revision = "ca137eb4b4389c9bc6f1a6d887f056bf16c00510"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/hashicorp/go-plugin"
|
||||
packages = ["."]
|
||||
revision = "e2fbc6864d18d3c37b6cde4297ec9fca266d28f1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/hashicorp/golang-lru"
|
||||
packages = [".","simplelru"]
|
||||
revision = "0a025b7e63adc15a622f29b0b2c4c3848243bbf6"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/hashicorp/yamux"
|
||||
packages = ["."]
|
||||
revision = "f5742cb6b85602e7fa834e9d5d91a7d7fa850824"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/howeyc/gopass"
|
||||
@@ -211,6 +205,12 @@
|
||||
revision = "3433f3ea46d9f8019119e7dd41274e112a2359a9"
|
||||
version = "0.2.2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/json-iterator/go"
|
||||
packages = ["."]
|
||||
revision = "6240e1e7983a85228f7fd9c3e1b6932d46ec58e2"
|
||||
version = "1.0.3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/juju/ratelimit"
|
||||
@@ -225,9 +225,21 @@
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/mitchellh/mapstructure"
|
||||
name = "github.com/mitchellh/go-testing-interface"
|
||||
packages = ["."]
|
||||
revision = "d0303fe809921458f417bcf828397a65db30a7e4"
|
||||
revision = "a61a99592b77c9ba629d254a693acffaeb4b7e28"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/petar/GoLLRB"
|
||||
packages = ["llrb"]
|
||||
revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/peterbourgon/diskv"
|
||||
packages = ["."]
|
||||
revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
|
||||
version = "v2.0.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pkg/errors"
|
||||
@@ -305,12 +317,6 @@
|
||||
packages = ["assert","mock","require"]
|
||||
revision = "890a5c3458b43e6104ff5da8dfa139d013d77544"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/ugorji/go"
|
||||
packages = ["codec"]
|
||||
revision = "5efa3251c7f7d05e5d9704a69a984ec9f1386a40"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
@@ -361,7 +367,7 @@
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/grpc"
|
||||
packages = [".","codes","connectivity","credentials","grpclb/grpc_lb_v1","grpclog","internal","keepalive","metadata","naming","peer","stats","status","tap","transport"]
|
||||
packages = [".","codes","connectivity","credentials","grpclb/grpc_lb_v1","grpclog","health","health/grpc_health_v1","internal","keepalive","metadata","naming","peer","stats","status","tap","transport"]
|
||||
revision = "b3ddf786825de56a4178401b7e174ee332173b66"
|
||||
version = "v1.5.2"
|
||||
|
||||
@@ -371,44 +377,55 @@
|
||||
revision = "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4"
|
||||
version = "v0.9.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
name = "gopkg.in/mgo.v2"
|
||||
packages = ["bson","internal/json"]
|
||||
revision = "3f83fa5005286a7fe593b055f0d7771a7dce4655"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f"
|
||||
|
||||
[[projects]]
|
||||
branch = "release-1.8"
|
||||
name = "k8s.io/api"
|
||||
packages = ["admissionregistration/v1alpha1","apps/v1beta1","apps/v1beta2","authentication/v1","authentication/v1beta1","authorization/v1","authorization/v1beta1","autoscaling/v1","autoscaling/v2beta1","batch/v1","batch/v1beta1","batch/v2alpha1","certificates/v1beta1","core/v1","extensions/v1beta1","networking/v1","policy/v1beta1","rbac/v1","rbac/v1alpha1","rbac/v1beta1","scheduling/v1alpha1","settings/v1alpha1","storage/v1","storage/v1beta1"]
|
||||
revision = "6c6dac0277229b9e9578c5ca3f74a4345d35cdc2"
|
||||
|
||||
[[projects]]
|
||||
name = "k8s.io/apimachinery"
|
||||
packages = ["pkg/api/equality","pkg/api/errors","pkg/api/meta","pkg/api/resource","pkg/apimachinery","pkg/apimachinery/announced","pkg/apimachinery/registered","pkg/apis/meta/v1","pkg/apis/meta/v1/unstructured","pkg/apis/meta/v1alpha1","pkg/conversion","pkg/conversion/queryparams","pkg/conversion/unstructured","pkg/fields","pkg/labels","pkg/openapi","pkg/runtime","pkg/runtime/schema","pkg/runtime/serializer","pkg/runtime/serializer/json","pkg/runtime/serializer/protobuf","pkg/runtime/serializer/recognizer","pkg/runtime/serializer/streaming","pkg/runtime/serializer/versioning","pkg/selection","pkg/types","pkg/util/cache","pkg/util/clock","pkg/util/diff","pkg/util/errors","pkg/util/framer","pkg/util/httpstream","pkg/util/httpstream/spdy","pkg/util/intstr","pkg/util/json","pkg/util/net","pkg/util/rand","pkg/util/remotecommand","pkg/util/runtime","pkg/util/sets","pkg/util/validation","pkg/util/validation/field","pkg/util/wait","pkg/util/yaml","pkg/version","pkg/watch","third_party/forked/golang/netutil","third_party/forked/golang/reflect"]
|
||||
revision = "1fd2e63a9a370677308a42f24fd40c86438afddf"
|
||||
packages = ["pkg/api/equality","pkg/api/errors","pkg/api/meta","pkg/api/resource","pkg/apimachinery","pkg/apimachinery/registered","pkg/apis/meta/internalversion","pkg/apis/meta/v1","pkg/apis/meta/v1/unstructured","pkg/apis/meta/v1alpha1","pkg/conversion","pkg/conversion/queryparams","pkg/conversion/unstructured","pkg/fields","pkg/labels","pkg/runtime","pkg/runtime/schema","pkg/runtime/serializer","pkg/runtime/serializer/json","pkg/runtime/serializer/protobuf","pkg/runtime/serializer/recognizer","pkg/runtime/serializer/streaming","pkg/runtime/serializer/versioning","pkg/selection","pkg/types","pkg/util/cache","pkg/util/clock","pkg/util/diff","pkg/util/errors","pkg/util/framer","pkg/util/httpstream","pkg/util/httpstream/spdy","pkg/util/intstr","pkg/util/json","pkg/util/mergepatch","pkg/util/net","pkg/util/remotecommand","pkg/util/runtime","pkg/util/sets","pkg/util/strategicpatch","pkg/util/validation","pkg/util/validation/field","pkg/util/wait","pkg/util/yaml","pkg/version","pkg/watch","third_party/forked/golang/json","third_party/forked/golang/netutil","third_party/forked/golang/reflect"]
|
||||
revision = "019ae5ada31de202164b118aee88ee2d14075c31"
|
||||
|
||||
[[projects]]
|
||||
name = "k8s.io/client-go"
|
||||
packages = ["discovery","discovery/fake","dynamic","kubernetes","kubernetes/scheme","kubernetes/typed/admissionregistration/v1alpha1","kubernetes/typed/apps/v1beta1","kubernetes/typed/authentication/v1","kubernetes/typed/authentication/v1beta1","kubernetes/typed/authorization/v1","kubernetes/typed/authorization/v1beta1","kubernetes/typed/autoscaling/v1","kubernetes/typed/autoscaling/v2alpha1","kubernetes/typed/batch/v1","kubernetes/typed/batch/v2alpha1","kubernetes/typed/certificates/v1beta1","kubernetes/typed/core/v1","kubernetes/typed/extensions/v1beta1","kubernetes/typed/networking/v1","kubernetes/typed/policy/v1beta1","kubernetes/typed/rbac/v1alpha1","kubernetes/typed/rbac/v1beta1","kubernetes/typed/settings/v1alpha1","kubernetes/typed/storage/v1","kubernetes/typed/storage/v1beta1","pkg/api","pkg/api/v1","pkg/api/v1/ref","pkg/apis/admissionregistration","pkg/apis/admissionregistration/v1alpha1","pkg/apis/apps","pkg/apis/apps/v1beta1","pkg/apis/authentication","pkg/apis/authentication/v1","pkg/apis/authentication/v1beta1","pkg/apis/authorization","pkg/apis/authorization/v1","pkg/apis/authorization/v1beta1","pkg/apis/autoscaling","pkg/apis/autoscaling/v1","pkg/apis/autoscaling/v2alpha1","pkg/apis/batch","pkg/apis/batch/v1","pkg/apis/batch/v2alpha1","pkg/apis/certificates","pkg/apis/certificates/v1beta1","pkg/apis/extensions","pkg/apis/extensions/v1beta1","pkg/apis/networking","pkg/apis/networking/v1","pkg/apis/policy","pkg/apis/policy/v1beta1","pkg/apis/rbac","pkg/apis/rbac/v1alpha1","pkg/apis/rbac/v1beta1","pkg/apis/settings","pkg/apis/settings/v1alpha1","pkg/apis/storage","pkg/apis/storage/v1","pkg/apis/storage/v1beta1","pkg/util","pkg/util/parsers","pkg/version","plugin/pkg/client/auth/azure","plugin/pkg/client/auth/gcp","plugin/pkg/client/auth/oidc","rest","rest/watch","testing","third_party/forked/golang/template","tools/auth","tools/cache","tools/clientcmd","tools/clientcmd/api","tools/clientcmd/api/latest","tools/clientcmd/api/v1","tools/metrics","tools/remotecommand","transport","util/cert","util/exec","util/flowcontrol","util/homedir","util/integer","util/jsonpath","util/workqueue"]
|
||||
revision = "d92e8497f71b7b4e0494e5bd204b48d34bd6f254"
|
||||
version = "v4.0.0"
|
||||
packages = ["discovery","discovery/fake","dynamic","kubernetes","kubernetes/scheme","kubernetes/typed/admissionregistration/v1alpha1","kubernetes/typed/apps/v1beta1","kubernetes/typed/apps/v1beta2","kubernetes/typed/authentication/v1","kubernetes/typed/authentication/v1beta1","kubernetes/typed/authorization/v1","kubernetes/typed/authorization/v1beta1","kubernetes/typed/autoscaling/v1","kubernetes/typed/autoscaling/v2beta1","kubernetes/typed/batch/v1","kubernetes/typed/batch/v1beta1","kubernetes/typed/batch/v2alpha1","kubernetes/typed/certificates/v1beta1","kubernetes/typed/core/v1","kubernetes/typed/extensions/v1beta1","kubernetes/typed/networking/v1","kubernetes/typed/policy/v1beta1","kubernetes/typed/rbac/v1","kubernetes/typed/rbac/v1alpha1","kubernetes/typed/rbac/v1beta1","kubernetes/typed/scheduling/v1alpha1","kubernetes/typed/settings/v1alpha1","kubernetes/typed/storage/v1","kubernetes/typed/storage/v1beta1","pkg/version","plugin/pkg/client/auth/azure","plugin/pkg/client/auth/gcp","plugin/pkg/client/auth/oidc","rest","rest/watch","testing","third_party/forked/golang/template","tools/auth","tools/cache","tools/clientcmd","tools/clientcmd/api","tools/clientcmd/api/latest","tools/clientcmd/api/v1","tools/metrics","tools/pager","tools/reference","tools/remotecommand","transport","transport/spdy","util/cert","util/exec","util/flowcontrol","util/homedir","util/integer","util/jsonpath","util/workqueue"]
|
||||
revision = "2ae454230481a7cb5544325e12ad7658ecccd19b"
|
||||
version = "v5.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "release-1.8"
|
||||
name = "k8s.io/code-generator"
|
||||
packages = ["cmd/client-gen","cmd/client-gen/args","cmd/client-gen/generators","cmd/client-gen/generators/fake","cmd/client-gen/generators/scheme","cmd/client-gen/generators/util","cmd/client-gen/path","cmd/client-gen/types","cmd/deepcopy-gen","cmd/defaulter-gen","cmd/informer-gen","cmd/informer-gen/generators","cmd/lister-gen","cmd/lister-gen/generators"]
|
||||
revision = "0c5165a97e055df28cca8bbcb8b7c8ae1019b3f1"
|
||||
|
||||
[[projects]]
|
||||
name = "k8s.io/gengo"
|
||||
packages = ["args","examples/deepcopy-gen/generators","examples/defaulter-gen/generators","examples/set-gen/sets","generator","namer","parser","types"]
|
||||
revision = "9e661e9308f078838e266cca1c673922088c0ea4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "k8s.io/gengo"
|
||||
packages = ["args","generator","namer","parser","types"]
|
||||
revision = "2ef5ef33e269934e14149598f5a85d1f561a7219"
|
||||
name = "k8s.io/kube-openapi"
|
||||
packages = ["pkg/common"]
|
||||
revision = "61b46af70dfed79c6d24530cd23b41440a7f22a5"
|
||||
|
||||
[[projects]]
|
||||
name = "k8s.io/kubernetes"
|
||||
packages = ["cmd/libs/go2idl/client-gen","cmd/libs/go2idl/client-gen/args","cmd/libs/go2idl/client-gen/generators","cmd/libs/go2idl/client-gen/generators/fake","cmd/libs/go2idl/client-gen/generators/scheme","cmd/libs/go2idl/client-gen/path","cmd/libs/go2idl/client-gen/types","cmd/libs/go2idl/informer-gen","cmd/libs/go2idl/informer-gen/generators","cmd/libs/go2idl/lister-gen","cmd/libs/go2idl/lister-gen/generators","pkg/printers","pkg/util/slice"]
|
||||
revision = "793658f2d7ca7f064d2bdf606519f9fe1229c381"
|
||||
version = "v1.7.4"
|
||||
packages = ["pkg/printers"]
|
||||
revision = "bdaeafa71f6c7c04636251031f93464384d54963"
|
||||
version = "v1.8.2"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "37edb445765bd183e89ff47d8a7822a132c3752a8b528e34f499ad4858f792a8"
|
||||
inputs-digest = "6287197115277ba882d5bb5dc20d74a8cb8e13d90c4e783c518a4e4aed55245f"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
||||
34
Gopkg.toml
34
Gopkg.toml
@@ -21,11 +21,15 @@
|
||||
# version = "2.4.0"
|
||||
|
||||
required = [
|
||||
"k8s.io/kubernetes/cmd/libs/go2idl/client-gen",
|
||||
"k8s.io/kubernetes/cmd/libs/go2idl/lister-gen",
|
||||
"k8s.io/kubernetes/cmd/libs/go2idl/informer-gen"
|
||||
]
|
||||
"k8s.io/code-generator/cmd/client-gen",
|
||||
# needed by generated clientsets, but not an explicit dep in client-gen itself
|
||||
"k8s.io/apimachinery/pkg/apimachinery/registered",
|
||||
|
||||
"k8s.io/code-generator/cmd/deepcopy-gen",
|
||||
"k8s.io/code-generator/cmd/defaulter-gen",
|
||||
"k8s.io/code-generator/cmd/lister-gen",
|
||||
"k8s.io/code-generator/cmd/informer-gen",
|
||||
]
|
||||
|
||||
[[constraint]]
|
||||
name = "cloud.google.com/go"
|
||||
@@ -85,16 +89,32 @@ required = [
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/apimachinery"
|
||||
revision = "1fd2e63a9a370677308a42f24fd40c86438afddf"
|
||||
revision = "019ae5ada31de202164b118aee88ee2d14075c31"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/client-go"
|
||||
version = "~4.0"
|
||||
version = "~5.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/kubernetes"
|
||||
version = "~1.7"
|
||||
version = "~1.8"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/russross/blackfriday"
|
||||
revision = "93622da34e54fb6529bfb7c57e710f37a8d9cbd8"
|
||||
|
||||
[[constraint]]
|
||||
branch = "release-1.8"
|
||||
name = "k8s.io/api"
|
||||
|
||||
[[constraint]]
|
||||
branch = "release-1.8"
|
||||
name = "k8s.io/code-generator"
|
||||
|
||||
[[override]]
|
||||
name = "k8s.io/gengo"
|
||||
revision = "9e661e9308f078838e266cca1c673922088c0ea4"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/hashicorp/go-plugin"
|
||||
|
||||
14
Makefile
14
Makefile
@@ -87,6 +87,7 @@ _output/bin/$(GOOS)/$(GOARCH)/$(BIN): build-dirs
|
||||
VERSION=$(VERSION) \
|
||||
PKG=$(PKG) \
|
||||
BIN=$(BIN) \
|
||||
OUTPUT_DIR=/output/$(GOOS)/$(GOARCH) \
|
||||
./hack/build.sh'"
|
||||
|
||||
TTY := $(shell tty -s && echo "-t")
|
||||
@@ -136,15 +137,12 @@ ifneq ($(SKIP_TESTS), 1)
|
||||
@$(MAKE) shell CMD="-c 'hack/test.sh $(SRC_DIRS)'"
|
||||
endif
|
||||
|
||||
fmt:
|
||||
@$(MAKE) shell CMD="-c 'hack/update-fmt.sh'"
|
||||
|
||||
verify:
|
||||
ifneq ($(SKIP_TESTS), 1)
|
||||
@$(MAKE) shell CMD="-c 'hack/verify-all.sh'"
|
||||
endif
|
||||
|
||||
update: fmt
|
||||
update:
|
||||
@$(MAKE) shell CMD="-c 'hack/update-all.sh'"
|
||||
|
||||
release: all-tar-bin checksum
|
||||
@@ -160,6 +158,7 @@ all-tar-bin: $(addprefix tar-bin-, $(CLI_PLATFORMS))
|
||||
tar-bin-%:
|
||||
@$(MAKE) ARCH=$* tar-bin
|
||||
|
||||
GIT_DESCRIBE = $(shell git describe --tags --always --dirty)
|
||||
tar-bin: build
|
||||
mkdir -p _output/release
|
||||
|
||||
@@ -168,7 +167,7 @@ tar-bin: build
|
||||
tar \
|
||||
-C _output/bin/$(GOOS)/$(GOARCH) \
|
||||
--files-from=- \
|
||||
-zcf _output/release/$(BIN)-$(GOOS)-$(GOARCH).tar.gz
|
||||
-zcf _output/release/$(BIN)-$(GIT_DESCRIBE)-$(GOOS)-$(GOARCH).tar.gz
|
||||
|
||||
build-dirs:
|
||||
@mkdir -p _output/bin/$(GOOS)/$(GOARCH)
|
||||
@@ -181,3 +180,8 @@ container-clean:
|
||||
|
||||
bin-clean:
|
||||
rm -rf .go _output
|
||||
|
||||
ci:
|
||||
hack/verify-all.sh
|
||||
hack/test.sh $(SRC_DIRS)
|
||||
GOOS=$(GOOS) GOARCH=$(GOARCH) VERSION=$(VERSION) PKG=$(PKG) BIN=$(BIN) ./hack/build.sh
|
||||
|
||||
14
README.md
14
README.md
@@ -155,6 +155,14 @@ Looking at a specific example--an `ark backup create test-backup` command trigge
|
||||
|
||||
5. By default, Ark also makes disk snapshots of any persistent volumes, using the appropriate cloud service API. (This can be disabled via the option `--snapshot-volumes=false`)
|
||||
|
||||
## Extensibility
|
||||
|
||||
Ark has multiple mechanisms for extending the core functionality to meet your individual backup/restore needs:
|
||||
|
||||
* [Hooks][27] allow you to specify commands to be executed within running pods during a backup. This is useful if you need to run a workload-specific command prior to taking a backup (for example, to flush disk buffers or to freeze a database).
|
||||
* [Plugins][28] enable you to develop custom object/block storage back-ends or per-item backup/restore actions that can execute arbitrary logic, including modifying the items being backed up/restored. Plugins can be used by Ark without needing to be compiled into the core Ark binary.
|
||||
|
||||
|
||||
## Further documentation
|
||||
|
||||
To learn more about Heptio Ark operations and their applications, see the [`/docs` directory][3].
|
||||
@@ -184,8 +192,8 @@ Feedback and discussion is available on [the mailing list][24].
|
||||
See [the list of releases][6] to find out about feature changes.
|
||||
|
||||
[0]: https://github.com/heptio
|
||||
[1]: https://jenkins.i.heptio.com/buildStatus/icon?job=ark-master
|
||||
[2]: https://jenkins.i.heptio.com/job/ark-master/
|
||||
[1]: https://travis-ci.org/heptio/ark.svg?branch=master
|
||||
[2]: https://travis-ci.org/heptio/ark
|
||||
[3]: /docs
|
||||
[4]: https://github.com/heptio/ark/issues
|
||||
[5]: /CONTRIBUTING.md
|
||||
@@ -210,3 +218,5 @@ See [the list of releases][6] to find out about feature changes.
|
||||
[24]: http://j.hept.io/ark-list
|
||||
[25]: http://slack.kubernetes.io/
|
||||
[26]: https://github.com/heptio/ark/releases
|
||||
[27]: /docs/hooks.md
|
||||
[28]: /docs/plugins.md
|
||||
@@ -44,6 +44,7 @@ These include:
|
||||
* Listers
|
||||
* Shared informers
|
||||
* Documentation
|
||||
* Protobuf/gRPC types
|
||||
|
||||
If you make any of the following changes, you will need to run `make update` to regenerate
|
||||
automatically generated files:
|
||||
@@ -51,6 +52,10 @@ automatically generated files:
|
||||
* Add/edit/remove commands or subcommands
|
||||
* Add new API types
|
||||
|
||||
If you make the following change, you will need to run [generate-proto.sh][13] to regenerate
|
||||
automatically generated files (note that this requires the [proto compiler][14] to be installed):
|
||||
* Add/edit/remove protobuf message or service definitions
|
||||
|
||||
### Cross compiling
|
||||
|
||||
By default, `make` will build an `ark` binary that runs on your host operating system and
|
||||
@@ -109,3 +114,5 @@ If you need to add or update the vendored dependencies, please see [Vendoring de
|
||||
[10]: #4-vendoring-dependencies
|
||||
[11]: vendoring-dependencies.md
|
||||
[12]: #3-test
|
||||
[13]: ../hack/generate-proto.sh
|
||||
[14]: https://grpc.io/docs/quickstart/go.html#install-protocol-buffers-v3
|
||||
@@ -30,7 +30,9 @@ operations can also be performed as 'ark backup get' and 'ark schedule create'.
|
||||
### SEE ALSO
|
||||
* [ark backup](ark_backup.md) - Work with backups
|
||||
* [ark create](ark_create.md) - Create ark resources
|
||||
* [ark describe](ark_describe.md) - Describe ark resources
|
||||
* [ark get](ark_get.md) - Get ark resources
|
||||
* [ark plugin](ark_plugin.md) - Work with plugins
|
||||
* [ark restore](ark_restore.md) - Work with restores
|
||||
* [ark schedule](ark_schedule.md) - Work with schedules
|
||||
* [ark server](ark_server.md) - Run the ark server
|
||||
|
||||
@@ -29,6 +29,7 @@ Work with backups
|
||||
### SEE ALSO
|
||||
* [ark](ark.md) - Back up and restore Kubernetes cluster resources.
|
||||
* [ark backup create](ark_backup_create.md) - Create a backup
|
||||
* [ark backup describe](ark_backup_describe.md) - Describe backups
|
||||
* [ark backup download](ark_backup_download.md) - Download a backup
|
||||
* [ark backup get](ark_backup_get.md) - Get backups
|
||||
* [ark backup logs](ark_backup_logs.md) - Get backup logs
|
||||
|
||||
@@ -26,7 +26,7 @@ ark backup create NAME [flags]
|
||||
-l, --selector labelSelector only back up resources matching this label selector (default <none>)
|
||||
--show-labels show labels in the last column
|
||||
--snapshot-volumes optionalBool[=true] take snapshots of PersistentVolumes as part of the backup
|
||||
--ttl duration how long before the backup can be garbage collected (default 24h0m0s)
|
||||
--ttl duration how long before the backup can be garbage collected (default 720h0m0s)
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
36
docs/cli-reference/ark_backup_describe.md
Normal file
36
docs/cli-reference/ark_backup_describe.md
Normal file
@@ -0,0 +1,36 @@
|
||||
## ark backup describe
|
||||
|
||||
Describe backups
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Describe backups
|
||||
|
||||
```
|
||||
ark backup describe [NAME1] [NAME2] [NAME...] [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for describe
|
||||
-l, --selector string only show items matching this label selector
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark backup](ark_backup.md) - Work with backups
|
||||
|
||||
@@ -26,7 +26,7 @@ ark create backup NAME [flags]
|
||||
-l, --selector labelSelector only back up resources matching this label selector (default <none>)
|
||||
--show-labels show labels in the last column
|
||||
--snapshot-volumes optionalBool[=true] take snapshots of PersistentVolumes as part of the backup
|
||||
--ttl duration how long before the backup can be garbage collected (default 24h0m0s)
|
||||
--ttl duration how long before the backup can be garbage collected (default 720h0m0s)
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
@@ -27,7 +27,7 @@ ark create schedule NAME [flags]
|
||||
-l, --selector labelSelector only back up resources matching this label selector (default <none>)
|
||||
--show-labels show labels in the last column
|
||||
--snapshot-volumes optionalBool[=true] take snapshots of PersistentVolumes as part of the backup
|
||||
--ttl duration how long before the backup can be garbage collected (default 24h0m0s)
|
||||
--ttl duration how long before the backup can be garbage collected (default 720h0m0s)
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
34
docs/cli-reference/ark_describe.md
Normal file
34
docs/cli-reference/ark_describe.md
Normal file
@@ -0,0 +1,34 @@
|
||||
## ark describe
|
||||
|
||||
Describe ark resources
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Describe ark resources
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for describe
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark](ark.md) - Back up and restore Kubernetes cluster resources.
|
||||
* [ark describe backups](ark_describe_backups.md) - Describe backups
|
||||
* [ark describe restores](ark_describe_restores.md) - Describe restores
|
||||
* [ark describe schedules](ark_describe_schedules.md) - Describe schedules
|
||||
|
||||
36
docs/cli-reference/ark_describe_backups.md
Normal file
36
docs/cli-reference/ark_describe_backups.md
Normal file
@@ -0,0 +1,36 @@
|
||||
## ark describe backups
|
||||
|
||||
Describe backups
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Describe backups
|
||||
|
||||
```
|
||||
ark describe backups [NAME1] [NAME2] [NAME...] [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for backups
|
||||
-l, --selector string only show items matching this label selector
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark describe](ark_describe.md) - Describe ark resources
|
||||
|
||||
36
docs/cli-reference/ark_describe_restores.md
Normal file
36
docs/cli-reference/ark_describe_restores.md
Normal file
@@ -0,0 +1,36 @@
|
||||
## ark describe restores
|
||||
|
||||
Describe restores
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Describe restores
|
||||
|
||||
```
|
||||
ark describe restores [NAME1] [NAME2] [NAME...] [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for restores
|
||||
-l, --selector string only show items matching this label selector
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark describe](ark_describe.md) - Describe ark resources
|
||||
|
||||
36
docs/cli-reference/ark_describe_schedules.md
Normal file
36
docs/cli-reference/ark_describe_schedules.md
Normal file
@@ -0,0 +1,36 @@
|
||||
## ark describe schedules
|
||||
|
||||
Describe schedules
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Describe schedules
|
||||
|
||||
```
|
||||
ark describe schedules [NAME1] [NAME2] [NAME...] [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for schedules
|
||||
-l, --selector string only show items matching this label selector
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark describe](ark_describe.md) - Describe ark resources
|
||||
|
||||
33
docs/cli-reference/ark_plugin.md
Normal file
33
docs/cli-reference/ark_plugin.md
Normal file
@@ -0,0 +1,33 @@
|
||||
## ark plugin
|
||||
|
||||
Work with plugins
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Work with plugins
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for plugin
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark](ark.md) - Back up and restore Kubernetes cluster resources.
|
||||
* [ark plugin add](ark_plugin_add.md) - Add a plugin
|
||||
* [ark plugin remove](ark_plugin_remove.md) - Remove a plugin
|
||||
|
||||
36
docs/cli-reference/ark_plugin_add.md
Normal file
36
docs/cli-reference/ark_plugin_add.md
Normal file
@@ -0,0 +1,36 @@
|
||||
## ark plugin add
|
||||
|
||||
Add a plugin
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Add a plugin
|
||||
|
||||
```
|
||||
ark plugin add IMAGE [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for add
|
||||
--image-pull-policy the imagePullPolicy for the plugin container. Valid values are Always, IfNotPresent, Never. (default IfNotPresent)
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark plugin](ark_plugin.md) - Work with plugins
|
||||
|
||||
35
docs/cli-reference/ark_plugin_remove.md
Normal file
35
docs/cli-reference/ark_plugin_remove.md
Normal file
@@ -0,0 +1,35 @@
|
||||
## ark plugin remove
|
||||
|
||||
Remove a plugin
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Remove a plugin
|
||||
|
||||
```
|
||||
ark plugin remove [NAME | IMAGE] [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for remove
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark plugin](ark_plugin.md) - Work with plugins
|
||||
|
||||
@@ -30,6 +30,7 @@ Work with restores
|
||||
* [ark](ark.md) - Back up and restore Kubernetes cluster resources.
|
||||
* [ark restore create](ark_restore_create.md) - Create a restore
|
||||
* [ark restore delete](ark_restore_delete.md) - Delete a restore
|
||||
* [ark restore describe](ark_restore_describe.md) - Describe restores
|
||||
* [ark restore get](ark_restore_get.md) - Get restores
|
||||
* [ark restore logs](ark_restore_logs.md) - Get restore logs
|
||||
|
||||
|
||||
36
docs/cli-reference/ark_restore_describe.md
Normal file
36
docs/cli-reference/ark_restore_describe.md
Normal file
@@ -0,0 +1,36 @@
|
||||
## ark restore describe
|
||||
|
||||
Describe restores
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Describe restores
|
||||
|
||||
```
|
||||
ark restore describe [NAME1] [NAME2] [NAME...] [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for describe
|
||||
-l, --selector string only show items matching this label selector
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark restore](ark_restore.md) - Work with restores
|
||||
|
||||
@@ -30,5 +30,6 @@ Work with schedules
|
||||
* [ark](ark.md) - Back up and restore Kubernetes cluster resources.
|
||||
* [ark schedule create](ark_schedule_create.md) - Create a schedule
|
||||
* [ark schedule delete](ark_schedule_delete.md) - Delete a schedule
|
||||
* [ark schedule describe](ark_schedule_describe.md) - Describe schedules
|
||||
* [ark schedule get](ark_schedule_get.md) - Get schedules
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ ark schedule create NAME [flags]
|
||||
-l, --selector labelSelector only back up resources matching this label selector (default <none>)
|
||||
--show-labels show labels in the last column
|
||||
--snapshot-volumes optionalBool[=true] take snapshots of PersistentVolumes as part of the backup
|
||||
--ttl duration how long before the backup can be garbage collected (default 24h0m0s)
|
||||
--ttl duration how long before the backup can be garbage collected (default 720h0m0s)
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
36
docs/cli-reference/ark_schedule_describe.md
Normal file
36
docs/cli-reference/ark_schedule_describe.md
Normal file
@@ -0,0 +1,36 @@
|
||||
## ark schedule describe
|
||||
|
||||
Describe schedules
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Describe schedules
|
||||
|
||||
```
|
||||
ark schedule describe [NAME1] [NAME2] [NAME...] [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for describe
|
||||
-l, --selector string only show items matching this label selector
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark schedule](ark_schedule.md) - Work with schedules
|
||||
|
||||
@@ -27,13 +27,13 @@ To integrate Heptio Ark with AWS, you should follow the instructions below to cr
|
||||
|
||||
2. Create an IAM user:
|
||||
|
||||
```
|
||||
```bash
|
||||
aws iam create-user --user-name heptio-ark
|
||||
```
|
||||
|
||||
3. Attach a policy to give `heptio-ark` the necessary permissions:
|
||||
|
||||
```
|
||||
```bash
|
||||
aws iam attach-user-policy \
|
||||
--policy-arn arn:aws:iam::aws:policy/AmazonS3FullAccess \
|
||||
--user-name heptio-ark
|
||||
@@ -44,13 +44,13 @@ To integrate Heptio Ark with AWS, you should follow the instructions below to cr
|
||||
|
||||
4. Create an access key for the user:
|
||||
|
||||
```
|
||||
```bash
|
||||
aws iam create-access-key --user-name heptio-ark
|
||||
```
|
||||
|
||||
The result should look like:
|
||||
|
||||
```
|
||||
```json
|
||||
{
|
||||
"AccessKey": {
|
||||
"UserName": "heptio-ark",
|
||||
@@ -73,13 +73,14 @@ To integrate Heptio Ark with AWS, you should follow the instructions below to cr
|
||||
#### Credentials and configuration
|
||||
|
||||
In the Ark root directory, run the following to first set up namespaces, RBAC, and other scaffolding:
|
||||
```
|
||||
|
||||
```bash
|
||||
kubectl apply -f examples/common/00-prereqs.yaml
|
||||
```
|
||||
|
||||
Create a Secret, running this command in the local directory of the credentials file you just created:
|
||||
|
||||
```
|
||||
```bash
|
||||
kubectl create secret generic cloud-credentials \
|
||||
--namespace heptio-ark \
|
||||
--from-file cloud=credentials-ark
|
||||
@@ -112,7 +113,7 @@ To integrate Heptio Ark with GCP, you should follow the instructions below to cr
|
||||
|
||||
2. View your current config settings:
|
||||
|
||||
```
|
||||
```bash
|
||||
gcloud config list
|
||||
```
|
||||
|
||||
@@ -120,19 +121,21 @@ To integrate Heptio Ark with GCP, you should follow the instructions below to cr
|
||||
|
||||
2. Create a service account:
|
||||
|
||||
```
|
||||
```bash
|
||||
gcloud iam service-accounts create heptio-ark \
|
||||
--display-name "Heptio Ark service account"
|
||||
```
|
||||
|
||||
Then list all accounts and find the `heptio-ark` account you just created:
|
||||
```
|
||||
```bash
|
||||
gcloud iam service-accounts list
|
||||
```
|
||||
|
||||
Set the `$SERVICE_ACCOUNT_EMAIL` variable to match its `email` value.
|
||||
|
||||
3. Attach policies to give `heptio-ark` the necessary permissions to function (replacing placeholders appropriately):
|
||||
|
||||
```
|
||||
```bash
|
||||
gcloud projects add-iam-policy-binding $PROJECT_ID \
|
||||
--member serviceAccount:$SERVICE_ACCOUNT_EMAIL \
|
||||
--role roles/compute.storageAdmin
|
||||
@@ -143,7 +146,7 @@ To integrate Heptio Ark with GCP, you should follow the instructions below to cr
|
||||
|
||||
4. Create a service account key, specifying an output file (`credentials-ark`) in your local directory:
|
||||
|
||||
```
|
||||
```bash
|
||||
gcloud iam service-accounts keys create credentials-ark \
|
||||
--iam-account $SERVICE_ACCOUNT_EMAIL
|
||||
```
|
||||
@@ -151,13 +154,14 @@ To integrate Heptio Ark with GCP, you should follow the instructions below to cr
|
||||
#### Credentials and configuration
|
||||
|
||||
In the Ark root directory, run the following to first set up namespaces, RBAC, and other scaffolding:
|
||||
```
|
||||
|
||||
```bash
|
||||
kubectl apply -f examples/common/00-prereqs.yaml
|
||||
```
|
||||
|
||||
Create a Secret, running this command in the local directory of the credentials file you just created:
|
||||
|
||||
```
|
||||
```bash
|
||||
kubectl create secret generic cloud-credentials \
|
||||
--namespace heptio-ark \
|
||||
--from-file cloud=credentials-ark
|
||||
@@ -181,75 +185,108 @@ Now that you have your Google Cloud credentials stored in a Secret, you need to
|
||||
|
||||
### Azure
|
||||
|
||||
#### Kubernetes cluster prerequisites
|
||||
|
||||
Ensure that the VMs for your agent pool allow Managed Disks. If I/O performance is critical,
|
||||
consider using Premium Managed Disks, as these are SSD backed.
|
||||
|
||||
#### Service principal creation
|
||||
To integrate Heptio Ark with Azure, you should follow the instructions below to create an Ark-specific [service principal][17].
|
||||
|
||||
1. If you do not have the `az` Azure CLI 2.0 locally installed, follow the [user guide][18] to set it up. Once done, run:
|
||||
1. If you do not have the `az` Azure CLI 2.0 locally installed, follow the [install guide][18] to set it up. Once done, run:
|
||||
|
||||
```
|
||||
```bash
|
||||
az login
|
||||
```
|
||||
|
||||
2. There are seven environment variables that need to be set for Heptio Ark to work properly. The following steps detail how to acquire these, in the process of setting up the necessary RBAC.
|
||||
|
||||
3. List your account:
|
||||
3. Obtain your Azure Account Subscription ID and Tenant ID:
|
||||
|
||||
```
|
||||
az account list
|
||||
```
|
||||
Save the relevant response values into environment variables: `id` corresponds to `$AZURE_SUBSCRIPTION_ID` and `tenantId` corresponds to `$AZURE_TENANT_ID`.
|
||||
|
||||
4. Assuming that you already have a running Kubernetes cluster on Azure, you should have a corresponding resource group as well. List your current groups to find it:
|
||||
|
||||
```
|
||||
az group list
|
||||
```
|
||||
Get your cluster's group `name` from the response, and use it to set `$AZURE_RESOURCE_GROUP`. (Also note the `location`--this is later used in the Azure-specific portion of the Ark Config).
|
||||
|
||||
5. Create a service principal with the "Contributor" role:
|
||||
|
||||
```
|
||||
az ad sp create-for-rbac --role="Contributor" --name="heptio-ark"
|
||||
```
|
||||
From the response, save `appId` into `$AZURE_CLIENT_ID` and `password` into `$AZURE_CLIENT_SECRET`.
|
||||
|
||||
6. Login into the `heptio-ark` service principal account:
|
||||
|
||||
```
|
||||
az login --service-principal \
|
||||
--username http://heptio-ark \
|
||||
--password $AZURE_CLIENT_SECRET \
|
||||
--tenant $AZURE_TENANT_ID
|
||||
```bash
|
||||
AZURE_SUBSCRIPTION_ID=`az account list --query '[?isDefault].id' -o tsv`
|
||||
AZURE_TENANT_ID=`az account list --query '[?isDefault].tenantId' -o tsv`
|
||||
```
|
||||
|
||||
7. Specify a *globally-unique* storage account id and save it in `$AZURE_STORAGE_ACCOUNT_ID`. Then create the storage account, specifying the optional `--location` flag if you do not have defaults from `az configure`:
|
||||
4. Set the name of the Resource Group that contains your Kubernetes cluster.
|
||||
|
||||
```bash
|
||||
# Change "Kubernetes" as needed
|
||||
AZURE_RESOURCE_GROUP=Kubernetes
|
||||
```
|
||||
|
||||
If you are unsure of the Resource Group name, run the following command to get a list that you can select from. Then set the `AZURE_RESOURCE_GROUP` environment variable to the appropriate value.
|
||||
|
||||
```bash
|
||||
az group list --query '[].{ ResourceGroup: name, Location:location }'
|
||||
```
|
||||
|
||||
Get your cluster's Resource Group name from the `ResourceGroup` value in the response, and use it to set `$AZURE_RESOURCE_GROUP`. (Also note the `Location` value in the response -- this is later used in the Azure-specific portion of the Ark Config).
|
||||
|
||||
5. Create a service principal with `Contributor` role. This will have subscription-wide access, so protect this credential. You can specify a password or let the `az ad sp create-for-rbac` command create one for you.
|
||||
|
||||
```bash
|
||||
# Create service principal and specify your own password
|
||||
AZURE_CLIENT_SECRET=super_secret_and_high_entropy_password_replace_me_with_your_own
|
||||
az ad sp create-for-rbac --name "heptio-ark" --role "Contributor" --password $AZURE_CLIENT_SECRET
|
||||
|
||||
# Or create service principal and let the cli generate a password for you. ensure we capture the password though.
|
||||
AZURE_CLIENT_SECRET=`az ad sp create-for-rbac --name "heptio-ark" --role "Contributor" --query 'password' -o tsv`
|
||||
|
||||
# After creating the service principal, obtain the client id
|
||||
AZURE_CLIENT_ID=`az ad sp list --display-name "heptio-ark" --query '[0].appId' -o tsv`
|
||||
```
|
||||
|
||||
6. Create the storage account and blob container for Ark to store the backups in.
|
||||
|
||||
The storage account can be created in the same Resource Group as your Kubernetes cluster or
|
||||
separated into its own Resource Group. The example below shows the storage account created in a
|
||||
separate `Ark_Backups` Resource Group.
|
||||
|
||||
The storage account needs to be created with a globally unique id since this is used for dns. The
|
||||
random function ensures you don't have to come up with a unique name. The storage account is
|
||||
created with encryption at rest capabilities (Microsoft managed keys) and is configured to only
|
||||
allow access via https.
|
||||
|
||||
```bash
|
||||
# Create a resource group for the backups storage account. Change the location as needed.
|
||||
AZURE_BACKUP_RESOURCE_GROUP=Ark_Backups
|
||||
az group create -n $AZURE_BACKUP_RESOURCE_GROUP --location WestUS
|
||||
|
||||
# Create the storage account
|
||||
AZURE_STORAGE_ACCOUNT_ID="ark`cat /proc/sys/kernel/random/uuid | cut -d '-' -f5`"
|
||||
az storage account create \
|
||||
--name $AZURE_STORAGE_ACCOUNT_ID \
|
||||
--resource-group $AZURE_RESOURCE_GROUP \
|
||||
--sku Standard_GRS
|
||||
```
|
||||
You will encounter an error message if the storage account ID is not unique; change it accordingly.
|
||||
--name $AZURE_STORAGE_ACCOUNT_ID \
|
||||
--resource-group $AZURE_BACKUP_RESOURCE_GROUP \
|
||||
--sku Standard_GRS \
|
||||
--encryption-services blob \
|
||||
--https-only true \
|
||||
--kind BlobStorage \
|
||||
--access-tier Hot
|
||||
|
||||
8. Get the keys for your storage account:
|
||||
# Create the blob container named "ark". Feel free to use a different name; you'll need to
|
||||
# adjust the `bucket` field under `backupStorageProvider` in the Ark Config accordingly if you do.
|
||||
az storage container create -n ark --public-access off --account-name $AZURE_STORAGE_ACCOUNT_ID
|
||||
|
||||
```
|
||||
az storage account keys list \
|
||||
--account-name $AZURE_STORAGE_ACCOUNT_ID \
|
||||
--resource-group $AZURE_RESOURCE_GROUP
|
||||
```
|
||||
Set `$AZURE_STORAGE_KEY` to any one of the `value`s returned.
|
||||
# Obtain the storage access key for the storage account just created
|
||||
AZURE_STORAGE_KEY=`az storage account keys list \
|
||||
--account-name $AZURE_STORAGE_ACCOUNT_ID \
|
||||
--resource-group $AZURE_BACKUP_RESOURCE_GROUP \
|
||||
--query [0].value \
|
||||
-o tsv`
|
||||
```
|
||||
|
||||
#### Credentials and configuration
|
||||
|
||||
In the Ark root directory, run the following to first set up namespaces, RBAC, and other scaffolding:
|
||||
```
|
||||
|
||||
```bash
|
||||
kubectl apply -f examples/common/00-prereqs.yaml
|
||||
```
|
||||
|
||||
Now you need to create a Secret that contains all the seven environment variables you just set. The command looks like the following:
|
||||
```
|
||||
|
||||
```bash
|
||||
kubectl create secret generic cloud-credentials \
|
||||
--namespace heptio-ark \
|
||||
--from-literal AZURE_SUBSCRIPTION_ID=${AZURE_SUBSCRIPTION_ID} \
|
||||
@@ -267,6 +304,34 @@ Now that you have your Azure credentials stored in a Secret, you need to replace
|
||||
|
||||
* Replace `<YOUR_BUCKET>`, `<YOUR_LOCATION>`, and `<YOUR_TIMEOUT>`. See the [Config definition][8] for details.
|
||||
|
||||
Here is an example of a completed file.
|
||||
|
||||
```yaml
|
||||
apiVersion: ark.heptio.com/v1
|
||||
kind: Config
|
||||
metadata:
|
||||
namespace: heptio-ark
|
||||
name: default
|
||||
persistentVolumeProvider:
|
||||
name: azure
|
||||
config:
|
||||
location: "West US"
|
||||
apiTimeout: 15m
|
||||
backupStorageProvider:
|
||||
name: azure
|
||||
bucket: ark
|
||||
backupSyncPeriod: 30m
|
||||
gcSyncPeriod: 30m
|
||||
scheduleSyncPeriod: 1m
|
||||
restoreOnlyMode: false
|
||||
```
|
||||
|
||||
You can get a complete list of Azure locations with the following command:
|
||||
|
||||
```bash
|
||||
az account list-locations --query "sort([].displayName)" -o tsv
|
||||
```
|
||||
|
||||
|
||||
## Run
|
||||
|
||||
@@ -277,33 +342,42 @@ Make sure that you have run `kubectl apply -f examples/common/00-prereqs.yaml` f
|
||||
* **AWS and GCP**
|
||||
|
||||
Start the Ark server itself, using the Config from the appropriate cloud-provider-specific directory:
|
||||
```
|
||||
|
||||
```bash
|
||||
kubectl apply -f examples/common/10-deployment.yaml
|
||||
kubectl apply -f examples/<CLOUD-PROVIDER>/
|
||||
```
|
||||
* **Azure**
|
||||
|
||||
Because Azure loads its credentials differently (from environment variables rather than a file), you need to instead run:
|
||||
```
|
||||
|
||||
```bash
|
||||
kubectl apply -f examples/azure/
|
||||
```
|
||||
|
||||
### Basic example (No PVs)
|
||||
|
||||
Start the sample nginx app:
|
||||
```
|
||||
|
||||
```bash
|
||||
kubectl apply -f examples/nginx-app/base.yaml
|
||||
```
|
||||
|
||||
Now create a backup:
|
||||
```
|
||||
|
||||
```bash
|
||||
ark backup create nginx-backup --selector app=nginx
|
||||
```
|
||||
|
||||
Simulate a disaster:
|
||||
```
|
||||
|
||||
```bash
|
||||
kubectl delete namespaces nginx-example
|
||||
```
|
||||
|
||||
Now restore your lost resources:
|
||||
```
|
||||
|
||||
```bash
|
||||
ark restore create nginx-backup
|
||||
```
|
||||
|
||||
@@ -312,28 +386,36 @@ ark restore create nginx-backup
|
||||
> NOTE: For Azure, your Kubernetes cluster needs to be version 1.7.2+ in order to support PV snapshotting of its managed disks.
|
||||
|
||||
Start the sample nginx app:
|
||||
```
|
||||
|
||||
```bash
|
||||
kubectl apply -f examples/nginx-app/with-pv.yaml
|
||||
```
|
||||
|
||||
Because Kubernetes does not automatically transfer labels from PVCs to dynamically generated PVs, you need to do so manually:
|
||||
```
|
||||
|
||||
```bash
|
||||
nginx_pv_name=$(kubectl get pv -o jsonpath='{.items[?(@.spec.claimRef.name=="nginx-logs")].metadata.name}')
|
||||
kubectl label pv $nginx_pv_name app=nginx
|
||||
```
|
||||
|
||||
Now create a backup with PV snapshotting:
|
||||
```
|
||||
|
||||
```bash
|
||||
ark backup create nginx-backup --selector app=nginx
|
||||
```
|
||||
|
||||
Simulate a disaster:
|
||||
```
|
||||
|
||||
```bash
|
||||
kubectl delete namespaces nginx-example
|
||||
kubectl delete pv $nginx_pv_name
|
||||
```
|
||||
|
||||
Because the default [reclaim policy][19] for dynamically-provisioned PVs is "Delete", the above commands should trigger your cloud provider to delete the disk backing the PV. The deletion process is asynchronous so this may take some time. **Before continuing to the next step, check your cloud provider (via dashboard or CLI) to confirm that the disk no longer exists.**
|
||||
|
||||
Now restore your lost resources:
|
||||
```
|
||||
|
||||
```bash
|
||||
ark restore create nginx-backup
|
||||
```
|
||||
|
||||
@@ -355,7 +437,7 @@ ark restore create nginx-backup
|
||||
[15]: https://cloud.google.com/compute/docs/access/service-accounts
|
||||
[16]: https://cloud.google.com/compute/docs/gcloud-compute
|
||||
[17]: https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-application-objects
|
||||
[18]: https://docs.microsoft.com/en-us/azure/storage/storage-azure-cli
|
||||
[18]: https://docs.microsoft.com/en-us/cli/azure/install-azure-cli
|
||||
[19]: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#reclaiming
|
||||
[20]: /CHANGELOG.md
|
||||
[21]: /docs/build-from-scratch.md
|
||||
|
||||
@@ -24,11 +24,13 @@ metadata:
|
||||
namespace: heptio-ark
|
||||
name: default
|
||||
persistentVolumeProvider:
|
||||
aws:
|
||||
name: aws
|
||||
config:
|
||||
region: us-west-2
|
||||
backupStorageProvider:
|
||||
name: aws
|
||||
bucket: ark
|
||||
aws:
|
||||
config:
|
||||
region: us-west-2
|
||||
backupSyncPeriod: 60m
|
||||
gcSyncPeriod: 60m
|
||||
@@ -44,9 +46,13 @@ The configurable parameters are as follows:
|
||||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| `persistentVolumeProvider` | CloudProviderConfig<br><br>(Supported key values are `aws`, `gcp`, and `azure`, but only one can be present. See the corresponding [AWS][0], [GCP][1], and [Azure][2]-specific configs.) | None (Optional) | The specification for whichever cloud provider the cluster is using for persistent volumes (to be snapshotted), if any.<br><br>If not specified, Backups and Restores requesting PV snapshots & restores, respectively, are considered invalid. <br><br> *NOTE*: For Azure, your Kubernetes cluster needs to be version 1.7.2+ in order to support PV snapshotting of its managed disks. |
|
||||
| `backupStorageProvider`/(inline) | CloudProviderConfig<br><br>(Supported key values are `aws`, `gcp`, and `azure`, but only one can be present. See the corresponding [AWS][0], [GCP][1], and [Azure][2]-specific configs.) | Required Field | The specification for whichever cloud provider will be used to actually store the backups. |
|
||||
| `persistentVolumeProvider` | CloudProviderConfig | None (Optional) | The specification for whichever cloud provider the cluster is using for persistent volumes (to be snapshotted), if any.<br><br>If not specified, Backups and Restores requesting PV snapshots & restores, respectively, are considered invalid. <br><br> *NOTE*: For Azure, your Kubernetes cluster needs to be version 1.7.2+ in order to support PV snapshotting of its managed disks. |
|
||||
| `persistentVolumeProvider/name` | String<br><br>(Ark natively supports `aws`, `gcp`, and `azure`. Other providers may be available via external plugins.) | None (Optional) | The name of the cloud provider the cluster is using for persistent volumes, if any. |
|
||||
| `persistentVolumeProvider/config` | map[string]string<br><br>(See the corresponding [AWS][0], [GCP][1], and [Azure][2]-specific configs or your provider's documentation.) | None (Optional) | Configuration keys/values to be passed to the cloud provider for persistent volumes. |
|
||||
| `backupStorageProvider` | CloudProviderConfig | Required Field | The specification for whichever cloud provider will be used to actually store the backups. |
|
||||
| `backupStorageProvider/name` | String<br><br>(Ark natively supports `aws`, `gcp`, and `azure`. Other providers may be available via external plugins.) | Required Field | The name of the cloud provider that will be used to actually store the backups. |
|
||||
| `backupStorageProvider/bucket` | String | Required Field | The storage bucket where backups are to be uploaded. |
|
||||
| `backupStorageProvider/config` | map[string]string<br><br>(See the corresponding [AWS][0], [GCP][1], and [Azure][2]-specific configs or your provider's documentation.) | None (Optional) | Configuration keys/values to be passed to the cloud provider for backup storage. |
|
||||
| `backupSyncPeriod` | metav1.Duration | 60m0s | How frequently Ark queries the object storage to make sure that the appropriate Backup resources have been created for existing backup files. |
|
||||
| `gcSyncPeriod` | metav1.Duration | 60m0s | How frequently Ark queries the object storage to delete backup files that have passed their TTL. |
|
||||
| `scheduleSyncPeriod` | metav1.Duration | 1m0s | How frequently Ark checks its Schedule resource objects to see if a backup needs to be initiated. |
|
||||
@@ -57,17 +63,16 @@ The configurable parameters are as follows:
|
||||
|
||||
**(Or other S3-compatible storage)**
|
||||
|
||||
#### backupStorageProvider
|
||||
#### backupStorageProvider/config
|
||||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| `region` | string | Required Field | *Example*: "us-east-1"<br><br>See [AWS documentation][3] for the full list. |
|
||||
| `disableSSL` | bool | `false` | Set this to `true` if you are using Minio (or another local, S3-compatible storage service) and your deployment is not secured. |
|
||||
| `s3ForcePathStyle` | bool | `false` | Set this to `true` if you are using a local storage service like Minio. |
|
||||
| `s3Url` | string | Required field for non-AWS-hosted storage| *Example*: http://minio:9000<br><br>You can specify the AWS S3 URL here for explicitness, but Ark can already generate it from `region`, and `bucket`. This field is primarily for local storage services like Minio.|
|
||||
| `kmsKeyId` | string | Empty | *Example*: "502b409c-4da1-419f-a16e-eif453b3i49f"<br><br>Specify an [AWS KMS key][10] id to enable encryption of the backups stored in S3. Only works with AWS S3 and may require explicitly granting key usage rights.|
|
||||
| `kmsKeyId` | string | Empty | *Example*: "502b409c-4da1-419f-a16e-eif453b3i49f" or "alias/`<KMS-Key-Alias-Name>`"<br><br>Specify an [AWS KMS key][10] id or alias to enable encryption of the backups stored in S3. Only works with AWS S3 and may require explicitly granting key usage rights.|
|
||||
|
||||
#### persistentVolumeProvider (AWS Only)
|
||||
#### persistentVolumeProvider/config (AWS Only)
|
||||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
@@ -75,11 +80,11 @@ The configurable parameters are as follows:
|
||||
|
||||
### GCP
|
||||
|
||||
#### backupStorageProvider
|
||||
#### backupStorageProvider/config
|
||||
|
||||
No parameters required; specify an empty object per [example file][11].
|
||||
No parameters required.
|
||||
|
||||
#### persistentVolumeProvider
|
||||
#### persistentVolumeProvider/config
|
||||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
@@ -87,11 +92,11 @@ No parameters required; specify an empty object per [example file][11].
|
||||
|
||||
### Azure
|
||||
|
||||
#### backupStorageProvider
|
||||
#### backupStorageProvider/config
|
||||
|
||||
No parameters required; specify an empty object per [example file][12].
|
||||
No parameters required.
|
||||
|
||||
#### persistentVolumeProvider
|
||||
#### persistentVolumeProvider/config
|
||||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
|
||||
@@ -15,37 +15,89 @@ backup-test-2-20170726180514 backup-test-2 Completed 0 0 2
|
||||
backup-test-2-20170726180515 backup-test-2 Completed 0 1 2017-07-26 13:32:59 -0400 EDT <none>
|
||||
```
|
||||
|
||||
To delve into the warnings and errors into more detail, you can use the `-o` option:
|
||||
To delve into the warnings and errors into more detail, you can use `ark restore describe`:
|
||||
```
|
||||
kubectl restore get backup-test-20170726180512 -o yaml
|
||||
ark restore describe backup-test-20170726180512
|
||||
```
|
||||
The output YAML has a `status` field which may look like the following:
|
||||
The output looks like this:
|
||||
```
|
||||
status:
|
||||
errors:
|
||||
ark: null
|
||||
cluster: null
|
||||
namespaces: null
|
||||
phase: Completed
|
||||
validationErrors: null
|
||||
warnings:
|
||||
ark: null
|
||||
cluster: null
|
||||
namespaces:
|
||||
cm1:
|
||||
- secrets "default-token-t0slk" already exists
|
||||
Name: backup-test-20170726180512
|
||||
Namespace: heptio-ark
|
||||
Labels: <none>
|
||||
Annotations: <none>
|
||||
|
||||
Backup: backup-test
|
||||
|
||||
Namespaces:
|
||||
Included: *
|
||||
Excluded: <none>
|
||||
|
||||
Resources:
|
||||
Included: serviceaccounts
|
||||
Excluded: nodes
|
||||
Cluster-scoped: auto
|
||||
|
||||
Namespace mappings: <none>
|
||||
|
||||
Label selector: <none>
|
||||
|
||||
Restore PVs: auto
|
||||
|
||||
Phase: Completed
|
||||
|
||||
Validation errors: <none>
|
||||
|
||||
Warnings:
|
||||
Ark: <none>
|
||||
Cluster: <none>
|
||||
Namespaces:
|
||||
heptio-ark: serviceaccounts "ark" already exists
|
||||
serviceaccounts "default" already exists
|
||||
kube-public: serviceaccounts "default" already exists
|
||||
kube-system: serviceaccounts "attachdetach-controller" already exists
|
||||
serviceaccounts "certificate-controller" already exists
|
||||
serviceaccounts "cronjob-controller" already exists
|
||||
serviceaccounts "daemon-set-controller" already exists
|
||||
serviceaccounts "default" already exists
|
||||
serviceaccounts "deployment-controller" already exists
|
||||
serviceaccounts "disruption-controller" already exists
|
||||
serviceaccounts "endpoint-controller" already exists
|
||||
serviceaccounts "generic-garbage-collector" already exists
|
||||
serviceaccounts "horizontal-pod-autoscaler" already exists
|
||||
serviceaccounts "job-controller" already exists
|
||||
serviceaccounts "kube-dns" already exists
|
||||
serviceaccounts "namespace-controller" already exists
|
||||
serviceaccounts "node-controller" already exists
|
||||
serviceaccounts "persistent-volume-binder" already exists
|
||||
serviceaccounts "pod-garbage-collector" already exists
|
||||
serviceaccounts "replicaset-controller" already exists
|
||||
serviceaccounts "replication-controller" already exists
|
||||
serviceaccounts "resourcequota-controller" already exists
|
||||
serviceaccounts "service-account-controller" already exists
|
||||
serviceaccounts "service-controller" already exists
|
||||
serviceaccounts "statefulset-controller" already exists
|
||||
serviceaccounts "ttl-controller" already exists
|
||||
default: serviceaccounts "default" already exists
|
||||
|
||||
Errors:
|
||||
Ark: <none>
|
||||
Cluster: <none>
|
||||
Namespaces: <none>
|
||||
```
|
||||
|
||||
## Structure
|
||||
The `status` field in a Restore's YAML has subfields for `errors` and `warnings`. `errors` appear for incomplete or partial restores. `warnings` appear for non-blocking issues (e.g. the restore looks "normal" and all resources referenced in the backup exist in some form, although some of them may have been pre-existing).
|
||||
|
||||
Both `errors` and `warnings` are structured in the same way:
|
||||
Errors appear for incomplete or partial restores. Warnings appear for non-blocking issues (e.g. the
|
||||
restore looks "normal" and all resources referenced in the backup exist in some form, although some
|
||||
of them may have been pre-existing).
|
||||
|
||||
* `ark`: A list of system-related issues encountered by the Ark server (e.g. couldn't read directory).
|
||||
Both errors and warnings are structured in the same way:
|
||||
|
||||
* `cluster`: A list of issues related to the restore of cluster-scoped resources.
|
||||
* `Ark`: A list of system-related issues encountered by the Ark server (e.g. couldn't read directory).
|
||||
|
||||
* `namespaces`: A map of namespaces to the list of issues related to the restore of their respective resources.
|
||||
* `Cluster`: A list of issues related to the restore of cluster-scoped resources.
|
||||
|
||||
* `Namespaces`: A map of namespaces to the list of issues related to the restore of their respective resources.
|
||||
|
||||
[0]: #example
|
||||
[1]: #structure
|
||||
|
||||
@@ -22,4 +22,4 @@ Examples of cases where Ark is useful:
|
||||
|
||||
Yes, with some exceptions. For example, when Ark restores pods it deletes the `nodeName` from the
|
||||
pod so that it can be scheduled onto a new node. You can see some more examples of the differences
|
||||
in [pod_restorer.go](https://github.com/heptio/ark/blob/master/pkg/restore/restorers/pod_restorer.go)
|
||||
in [pod_action.go](https://github.com/heptio/ark/blob/master/pkg/restore/pod_action.go)
|
||||
|
||||
35
docs/plugins.md
Normal file
35
docs/plugins.md
Normal file
@@ -0,0 +1,35 @@
|
||||
# Plugins
|
||||
|
||||
Heptio Ark has a plugin architecture that allows users to add their own custom functionality to Ark backups & restores
|
||||
without having to modify/recompile the core Ark binary. To add custom functionality, users simply create their own binary
|
||||
containing an implementation of one of Ark's plugin kinds (described below), plus a small amount of boilerplate code to
|
||||
expose the plugin implementation to Ark. This binary is added to a container image that serves as an init container for
|
||||
the Ark server pod and copies the binary into a shared emptyDir volume for the Ark server to access.
|
||||
|
||||
A fully-functional [sample plugin repository][1] is provided to serve as a convenient starting point for plugin authors.
|
||||
|
||||
## Plugin Kinds
|
||||
|
||||
Ark currently supports the following kinds of plugins:
|
||||
|
||||
- **Object Store** - persists and retrieves backups, backup logs and restore logs
|
||||
- **Block Store** - creates volume snapshots (during backup) and restores volumes from snapshots (during restore)
|
||||
- **Backup Item Action** - executes arbitrary logic for individual items prior to storing them in a backup file
|
||||
- **Restore Item Action** - executes arbitrary logic for individual items prior to restoring them into a cluster
|
||||
|
||||
## Plugin Naming
|
||||
|
||||
Ark relies on a naming convention to identify plugins. Each plugin binary should be named `ark-<plugin-kind>-<name>`,
|
||||
where `plugin-kind` is one of `objectstore`, `blockstore`, `backupitemaction`, or `restoreitemaction`, and `name` is
|
||||
unique within the plugin kind.
|
||||
|
||||
## Plugin Logging
|
||||
|
||||
Ark provides a [logger][2] that can be used by plugins to log structured information to the main Ark server log or
|
||||
per-backup/restore logs. See the [sample repository][1] for an example of how to instantiate and use the logger
|
||||
within your plugin.
|
||||
|
||||
|
||||
|
||||
[1]: https://github.com/heptio/ark-plugin-example
|
||||
[2]: https://github.com/heptio/ark/blob/master/pkg/plugin/logger.go
|
||||
@@ -37,7 +37,7 @@ Heptio Ark can help you port your resources from one cluster to another, as long
|
||||
```
|
||||
ark backup create <BACKUP-NAME>
|
||||
```
|
||||
The default TTL is 24 hours; you can use the `--ttl` flag to change this as necessary.
|
||||
The default TTL is 30 days (720 hours); you can use the `--ttl` flag to change this as necessary.
|
||||
|
||||
2. *(Cluster 2)* Make sure that the `persistentVolumeProvider` and `backupStorageProvider` fields in the Ark Config match the ones from *Cluster 1*, so that your new Ark server instance is pointing to the same bucket.
|
||||
|
||||
|
||||
@@ -19,11 +19,13 @@ metadata:
|
||||
namespace: heptio-ark
|
||||
name: default
|
||||
persistentVolumeProvider:
|
||||
aws:
|
||||
name: aws
|
||||
config:
|
||||
region: <YOUR_REGION>
|
||||
backupStorageProvider:
|
||||
name: aws
|
||||
bucket: <YOUR_BUCKET>
|
||||
aws:
|
||||
config:
|
||||
region: <YOUR_REGION>
|
||||
backupSyncPeriod: 30m
|
||||
gcSyncPeriod: 30m
|
||||
|
||||
@@ -37,3 +37,9 @@ spec:
|
||||
envFrom:
|
||||
- secretRef:
|
||||
name: cloud-credentials
|
||||
volumeMounts:
|
||||
- name: plugins
|
||||
mountPath: /plugins
|
||||
volumes:
|
||||
- name: plugins
|
||||
emptyDir: {}
|
||||
|
||||
@@ -19,12 +19,13 @@ metadata:
|
||||
namespace: heptio-ark
|
||||
name: default
|
||||
persistentVolumeProvider:
|
||||
azure:
|
||||
name: azure
|
||||
config:
|
||||
location: <YOUR_LOCATION>
|
||||
apiTimeout: <YOUR_TIMEOUT>
|
||||
backupStorageProvider:
|
||||
name: azure
|
||||
bucket: <YOUR_BUCKET>
|
||||
azure: {}
|
||||
backupSyncPeriod: 30m
|
||||
gcSyncPeriod: 30m
|
||||
scheduleSyncPeriod: 1m
|
||||
|
||||
@@ -37,6 +37,8 @@ spec:
|
||||
volumeMounts:
|
||||
- name: cloud-credentials
|
||||
mountPath: /credentials
|
||||
- name: plugins
|
||||
mountPath: /plugins
|
||||
env:
|
||||
- name: AWS_SHARED_CREDENTIALS_FILE
|
||||
value: /credentials/cloud
|
||||
@@ -44,3 +46,5 @@ spec:
|
||||
- name: cloud-credentials
|
||||
secret:
|
||||
secretName: cloud-credentials
|
||||
- name: plugins
|
||||
emptyDir: {}
|
||||
|
||||
@@ -19,11 +19,12 @@ metadata:
|
||||
namespace: heptio-ark
|
||||
name: default
|
||||
persistentVolumeProvider:
|
||||
gcp:
|
||||
name: gcp
|
||||
config:
|
||||
project: <YOUR_PROJECT>
|
||||
backupStorageProvider:
|
||||
name: gcp
|
||||
bucket: <YOUR_BUCKET>
|
||||
gcp: {}
|
||||
backupSyncPeriod: 30m
|
||||
gcSyncPeriod: 30m
|
||||
scheduleSyncPeriod: 1m
|
||||
|
||||
@@ -19,11 +19,12 @@ metadata:
|
||||
namespace: heptio-ark
|
||||
name: default
|
||||
backupStorageProvider:
|
||||
name: aws
|
||||
bucket: ark
|
||||
aws:
|
||||
config:
|
||||
region: minio
|
||||
s3ForcePathStyle: true
|
||||
s3Url: http://minio:9000
|
||||
s3ForcePathStyle: "true"
|
||||
s3Url: http://minio.heptio-ark.svc:9000
|
||||
backupSyncPeriod: 1m
|
||||
gcSyncPeriod: 1m
|
||||
scheduleSyncPeriod: 1m
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
Copyright YEAR the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -13,5 +13,3 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/sh
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
@@ -53,7 +53,10 @@ LDFLAGS="-X ${PKG}/pkg/buildinfo.Version=${VERSION}"
|
||||
LDFLAGS="${LDFLAGS} -X ${PKG}/pkg/buildinfo.GitSHA=${GIT_SHA}"
|
||||
LDFLAGS="${LDFLAGS} -X ${PKG}/pkg/buildinfo.GitTreeState=${GIT_TREE_STATE}"
|
||||
|
||||
OUTPUT=/output/${GOOS}/${GOARCH}/${BIN}
|
||||
if [[ -z "${OUTPUT_DIR:-}" ]]; then
|
||||
OUTPUT_DIR=.
|
||||
fi
|
||||
OUTPUT=${OUTPUT_DIR}/${BIN}
|
||||
if [[ "${GOOS}" = "windows" ]]; then
|
||||
OUTPUT="${OUTPUT}.exe"
|
||||
fi
|
||||
|
||||
@@ -16,12 +16,8 @@
|
||||
|
||||
HACK_DIR=$(dirname "${BASH_SOURCE}")
|
||||
|
||||
echo "Verifying generated listers"
|
||||
echo "Running protoc"
|
||||
|
||||
if ! output=$(${HACK_DIR}/update-generated-listers.sh --verify-only 2>&1); then
|
||||
echo "FAILURE: verification of listers failed:"
|
||||
echo "${output}"
|
||||
exit 1
|
||||
fi
|
||||
protoc pkg/plugin/proto/*.proto --go_out=plugins=grpc:pkg/plugin/generated/ -I pkg/plugin/proto/
|
||||
|
||||
echo "Success!"
|
||||
31
hack/test.sh
31
hack/test.sh
@@ -1,4 +1,4 @@
|
||||
#!/bin/sh
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
@@ -25,31 +25,4 @@ TARGETS=$(for d in "$@"; do echo ./$d/...; done)
|
||||
echo "Running tests:"
|
||||
go test -i -installsuffix "static" ${TARGETS}
|
||||
go test -installsuffix "static" -timeout 60s ${TARGETS}
|
||||
echo
|
||||
|
||||
echo -n "Checking gofmt: "
|
||||
ERRS=$(find "$@" -type f -name \*.go | xargs gofmt -l 2>&1 || true)
|
||||
if [ -n "${ERRS}" ]; then
|
||||
echo "FAIL - the following files need to be gofmt'ed:"
|
||||
for e in ${ERRS}; do
|
||||
echo " $e"
|
||||
done
|
||||
echo
|
||||
exit 1
|
||||
fi
|
||||
echo "PASS"
|
||||
echo
|
||||
|
||||
# TODO(ncdc): there are govet failures in the generated clientset and the log error location hook
|
||||
# that prevent us from running vet at this time.
|
||||
#
|
||||
# echo -n "Checking go vet: "
|
||||
# ERRS=$(go vet ${TARGETS} 2>&1 || true)
|
||||
# if [ -n "${ERRS}" ]; then
|
||||
# echo "FAIL"
|
||||
# echo "${ERRS}"
|
||||
# echo
|
||||
# exit 1
|
||||
# fi
|
||||
# echo "PASS"
|
||||
# echo
|
||||
echo "Success!"
|
||||
|
||||
@@ -18,7 +18,9 @@ HACK_DIR=$(dirname "${BASH_SOURCE}")
|
||||
|
||||
echo "Updating formatting"
|
||||
|
||||
gofmt -w=true $(find . -type f -name "*.go" -not -path "./vendor/*" -not -path "./pkg/generated/*")
|
||||
goimports -w=true -d $(find . -type f -name "*.go" -not -path "./vendor/*" -not -path "./pkg/generated/*")
|
||||
gofmt -w -s $(find . -type f -name "*.go" -not -path "./vendor/*" -not -path "./pkg/generated/*" -not -name "zz_generated*")
|
||||
|
||||
command -v goimports > /dev/null || go get golang.org/x/tools/cmd/goimports
|
||||
goimports -w -d $(find . -type f -name "*.go" -not -path "./vendor/*" -not -path "./pkg/generated/*" -not -name "zz_generated*")
|
||||
|
||||
echo "Success!"
|
||||
|
||||
@@ -1,60 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Copyright 2017 Heptio Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
ARK_ROOT=$(realpath $(dirname ${BASH_SOURCE})/..)
|
||||
BIN=${ARK_ROOT}/_output/bin
|
||||
mkdir -p ${BIN}
|
||||
go build -o ${BIN}/client-gen ./vendor/k8s.io/kubernetes/cmd/libs/go2idl/client-gen
|
||||
|
||||
OUTPUT_BASE=""
|
||||
if [[ -z "${GOPATH}" ]]; then
|
||||
OUTPUT_BASE="${HOME}/go/src"
|
||||
else
|
||||
OUTPUT_BASE="${GOPATH}/src"
|
||||
fi
|
||||
|
||||
verify=""
|
||||
for i in "$@"; do
|
||||
if [[ $i == "--verify-only" ]]; then
|
||||
verify=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ -z ${verify} ]]; then
|
||||
echo "Updating generated clientsets"
|
||||
|
||||
find ${ARK_ROOT}/pkg/generated/clientset \
|
||||
\( \
|
||||
-name '*.go' -and \
|
||||
\( \
|
||||
! -name '*_expansion.go' \
|
||||
-or \
|
||||
-name generated_expansion.go \
|
||||
\) \
|
||||
\) -exec rm {} \;
|
||||
fi
|
||||
|
||||
${BIN}/client-gen \
|
||||
--go-header-file /dev/null \
|
||||
--output-base ${OUTPUT_BASE} \
|
||||
--input-base github.com/heptio/ark/pkg/apis \
|
||||
--clientset-path github.com/heptio/ark/pkg/generated \
|
||||
--input ark/v1 \
|
||||
--clientset-name clientset \
|
||||
$@
|
||||
|
||||
echo "Success!"
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Copyright 2017 Heptio Inc.
|
||||
# Copyright 2017 the Heptio Ark contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -15,13 +15,12 @@
|
||||
# limitations under the License.
|
||||
|
||||
HACK_DIR=$(dirname "${BASH_SOURCE}")
|
||||
REPO_ROOT=${HACK_DIR}/..
|
||||
|
||||
echo "Verifying generated clientsets"
|
||||
|
||||
if ! output=$(${HACK_DIR}/update-generated-clientsets.sh --verify-only 2>&1); then
|
||||
echo "FAILURE: verification of clientsets failed:"
|
||||
echo "${output}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Success!"
|
||||
${REPO_ROOT}/vendor/k8s.io/code-generator/generate-groups.sh \
|
||||
all \
|
||||
github.com/heptio/ark/pkg/generated \
|
||||
github.com/heptio/ark/pkg/apis \
|
||||
ark:v1 \
|
||||
--go-header-file hack/boilerplate.go.txt \
|
||||
$@
|
||||
@@ -14,7 +14,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
ARK_ROOT=$(realpath $(dirname ${BASH_SOURCE})/..)
|
||||
ARK_ROOT=$(dirname ${BASH_SOURCE})/..
|
||||
BIN=${ARK_ROOT}/_output/bin
|
||||
mkdir -p ${BIN}
|
||||
|
||||
|
||||
@@ -1,55 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Copyright 2017 Heptio Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
ARK_ROOT=$(realpath $(dirname ${BASH_SOURCE})/..)
|
||||
BIN=${ARK_ROOT}/_output/bin
|
||||
mkdir -p ${BIN}
|
||||
|
||||
echo "Updating generated informers"
|
||||
|
||||
go build -o ${BIN}/informer-gen ./vendor/k8s.io/kubernetes/cmd/libs/go2idl/informer-gen
|
||||
|
||||
OUTPUT_BASE=""
|
||||
if [[ -z "${GOPATH}" ]]; then
|
||||
OUTPUT_BASE="${HOME}/go/src"
|
||||
else
|
||||
OUTPUT_BASE="${GOPATH}/src"
|
||||
fi
|
||||
|
||||
verify=""
|
||||
for i in "$@"; do
|
||||
if [[ $i == "--verify-only" ]]; then
|
||||
verify=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ -z ${verify} ]]; then
|
||||
rm -rf ${ARK_ROOT}/pkg/generated/informers
|
||||
fi
|
||||
|
||||
${BIN}/informer-gen \
|
||||
--logtostderr \
|
||||
--go-header-file /dev/null \
|
||||
--output-base ${OUTPUT_BASE} \
|
||||
--input-dirs github.com/heptio/ark/pkg/apis/ark/v1 \
|
||||
--output-package github.com/heptio/ark/pkg/generated/informers \
|
||||
--listers-package github.com/heptio/ark/pkg/generated/listers \
|
||||
--internal-clientset-package github.com/heptio/ark/pkg/generated/clientset \
|
||||
--versioned-clientset-package github.com/heptio/ark/pkg/generated/clientset \
|
||||
$@
|
||||
|
||||
echo "Success!"
|
||||
@@ -1,60 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Copyright 2017 Heptio Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
ARK_ROOT=$(realpath $(dirname ${BASH_SOURCE})/..)
|
||||
BIN=${ARK_ROOT}/_output/bin
|
||||
mkdir -p ${BIN}
|
||||
|
||||
echo "Updating generated listers"
|
||||
|
||||
go build -o ${BIN}/lister-gen ./vendor/k8s.io/kubernetes/cmd/libs/go2idl/lister-gen
|
||||
|
||||
OUTPUT_BASE=""
|
||||
if [[ -z "${GOPATH}" ]]; then
|
||||
OUTPUT_BASE="${HOME}/go/src"
|
||||
else
|
||||
OUTPUT_BASE="${GOPATH}/src"
|
||||
fi
|
||||
|
||||
verify=""
|
||||
for i in "$@"; do
|
||||
if [[ $i == "--verify-only" ]]; then
|
||||
verify=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ -z ${verify} ]]; then
|
||||
find ${ARK_ROOT}/pkg/generated/listers \
|
||||
\( \
|
||||
-name '*.go' -and \
|
||||
\( \
|
||||
! -name '*_expansion.go' \
|
||||
-or \
|
||||
-name generated_expansion.go \
|
||||
\) \
|
||||
\) -exec rm {} \;
|
||||
fi
|
||||
|
||||
${BIN}/lister-gen \
|
||||
--logtostderr \
|
||||
--go-header-file /dev/null \
|
||||
--output-base ${OUTPUT_BASE} \
|
||||
--input-dirs github.com/heptio/ark/pkg/apis/ark/v1 \
|
||||
--output-package github.com/heptio/ark/pkg/generated/listers \
|
||||
$@
|
||||
|
||||
echo "Success!"
|
||||
32
hack/verify-fmt.sh
Executable file
32
hack/verify-fmt.sh
Executable file
@@ -0,0 +1,32 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Copyright 2017 the Heptio Ark contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
HACK_DIR=$(dirname "${BASH_SOURCE}")
|
||||
|
||||
echo "Verifying gofmt"
|
||||
files=$(gofmt -l -s $(find . -type f -name "*.go" -not -path "./vendor/*" -not -path "./pkg/generated/*" -not -name "zz_generated*"))
|
||||
if [[ -n "${files}" ]]; then
|
||||
echo "The following files need gofmt updating - please run 'make update'"
|
||||
echo "${files}"
|
||||
exit 1
|
||||
fi
|
||||
echo "Success!"
|
||||
|
||||
echo "Verifying goimports"
|
||||
command -v goimports > /dev/null || go get golang.org/x/tools/cmd/goimports
|
||||
goimports -l $(find . -type f -name "*.go" -not -path "./vendor/*" -not -path "./pkg/generated/*" -not -name "zz_generated*")
|
||||
echo "Success!"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/bin/bash -e
|
||||
#
|
||||
# Copyright 2017 Heptio Inc.
|
||||
# Copyright 2017 the Heptio Ark contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -16,12 +16,4 @@
|
||||
|
||||
HACK_DIR=$(dirname "${BASH_SOURCE}")
|
||||
|
||||
echo "Verifying generated informers"
|
||||
|
||||
if ! output=$(${HACK_DIR}/update-generated-informers.sh --verify-only 2>&1); then
|
||||
echo "FAILURE: verification of informers failed:"
|
||||
echo "${output}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Success!"
|
||||
${HACK_DIR}/update-generated-crd-code.sh --verify-only
|
||||
@@ -14,7 +14,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
ARK_ROOT=$(realpath $(dirname ${BASH_SOURCE})/..)
|
||||
ARK_ROOT=$(dirname ${BASH_SOURCE})/..
|
||||
HACK_DIR=$(dirname "${BASH_SOURCE}")
|
||||
DOCS_DIR=${ARK_ROOT}/docs/cli-reference
|
||||
TMP_DIR="$(mktemp -d)"
|
||||
|
||||
@@ -184,7 +184,8 @@ type VolumeBackupInfo struct {
|
||||
Iops *int64 `json:"iops,omitempty"`
|
||||
}
|
||||
|
||||
// +genclient=true
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// Backup is an Ark resource that respresents the capture of Kubernetes
|
||||
// cluster state at a point in time (API objects and associated volume state).
|
||||
@@ -196,6 +197,8 @@ type Backup struct {
|
||||
Status BackupStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// BackupList is a list of Backups.
|
||||
type BackupList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
@@ -18,6 +18,8 @@ package v1
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ConfigList is a list of Configs.
|
||||
type ConfigList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
@@ -26,7 +28,8 @@ type ConfigList struct {
|
||||
Items []Config `json:"items"`
|
||||
}
|
||||
|
||||
// +genclient=true
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// Config is an Ark resource that captures configuration information to be
|
||||
// used for running the Ark server.
|
||||
@@ -66,17 +69,11 @@ type Config struct {
|
||||
}
|
||||
|
||||
// CloudProviderConfig is configuration information about how to connect
|
||||
// to a particular cloud. Only one of the members (AWS, GCP, Azure) may
|
||||
// be present.
|
||||
// to a particular cloud.
|
||||
type CloudProviderConfig struct {
|
||||
// AWS is configuration information for connecting to AWS.
|
||||
AWS *AWSConfig `json:"aws"`
|
||||
Name string `json:"name"`
|
||||
|
||||
// GCP is configuration information for connecting to GCP.
|
||||
GCP *GCPConfig `json:"gcp"`
|
||||
|
||||
// Azure is configuration information for connecting to Azure.
|
||||
Azure *AzureConfig `json:"azure"`
|
||||
Config map[string]string `json:"config"`
|
||||
}
|
||||
|
||||
// ObjectStorageProviderConfig is configuration information for connecting to
|
||||
@@ -90,23 +87,3 @@ type ObjectStorageProviderConfig struct {
|
||||
// are stored.
|
||||
Bucket string `json:"bucket"`
|
||||
}
|
||||
|
||||
// AWSConfig is configuration information for connecting to AWS.
|
||||
type AWSConfig struct {
|
||||
Region string `json:"region"`
|
||||
DisableSSL bool `json:"disableSSL"`
|
||||
S3ForcePathStyle bool `json:"s3ForcePathStyle"`
|
||||
S3Url string `json:"s3Url"`
|
||||
KMSKeyID string `json:"kmsKeyId"`
|
||||
}
|
||||
|
||||
// GCPConfig is configuration information for connecting to GCP.
|
||||
type GCPConfig struct {
|
||||
Project string `json:"project"`
|
||||
}
|
||||
|
||||
// AzureConfig is configuration information for connecting to Azure.
|
||||
type AzureConfig struct {
|
||||
Location string `json:"location"`
|
||||
APITimeout metav1.Duration `json:"apiTimeout"`
|
||||
}
|
||||
|
||||
@@ -14,6 +14,8 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
|
||||
// Package v1 is the v1 version of the API.
|
||||
// +groupName=ark.heptio.com
|
||||
package v1
|
||||
|
||||
@@ -31,6 +31,7 @@ const (
|
||||
DownloadTargetKindBackupLog DownloadTargetKind = "BackupLog"
|
||||
DownloadTargetKindBackupContents DownloadTargetKind = "BackupContents"
|
||||
DownloadTargetKindRestoreLog DownloadTargetKind = "RestoreLog"
|
||||
DownloadTargetKindRestoreResults DownloadTargetKind = "RestoreResults"
|
||||
)
|
||||
|
||||
// DownloadTarget is the specification for what kind of file to download, and the name of the
|
||||
@@ -64,7 +65,8 @@ type DownloadRequestStatus struct {
|
||||
Expiration metav1.Time `json:"expiration"`
|
||||
}
|
||||
|
||||
// +genclient=true
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// DownloadRequest is a request to download an artifact from backup object storage, such as a backup
|
||||
// log file.
|
||||
@@ -76,6 +78,8 @@ type DownloadRequest struct {
|
||||
Status DownloadRequestStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// DownloadRequestList is a list of DownloadRequests.
|
||||
type DownloadRequestList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
@@ -91,13 +91,13 @@ type RestoreStatus struct {
|
||||
// applicable)
|
||||
ValidationErrors []string `json:"validationErrors"`
|
||||
|
||||
// Warnings is a collection of all warning messages that were
|
||||
// generated during execution of the restore
|
||||
Warnings RestoreResult `json:"warnings"`
|
||||
// Warnings is a count of all warning messages that were generated during
|
||||
// execution of the restore. The actual warnings are stored in object storage.
|
||||
Warnings int `json:"warnings"`
|
||||
|
||||
// Errors is a collection of all error messages that were
|
||||
// generated during execution of the restore
|
||||
Errors RestoreResult `json:"errors"`
|
||||
// Errors is a count of all error messages that were generated during
|
||||
// execution of the restore. The actual errors are stored in object storage.
|
||||
Errors int `json:"errors"`
|
||||
}
|
||||
|
||||
// RestoreResult is a collection of messages that were generated
|
||||
@@ -118,7 +118,8 @@ type RestoreResult struct {
|
||||
Namespaces map[string][]string `json:"namespaces"`
|
||||
}
|
||||
|
||||
// +genclient=true
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// Restore is an Ark resource that represents the application of
|
||||
// resources from an Ark backup to a target Kubernetes cluster.
|
||||
@@ -130,6 +131,8 @@ type Restore struct {
|
||||
Status RestoreStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// RestoreList is a list of Restores.
|
||||
type RestoreList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
@@ -61,7 +61,8 @@ type ScheduleStatus struct {
|
||||
ValidationErrors []string `json:"validationErrors"`
|
||||
}
|
||||
|
||||
// +genclient=true
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// Schedule is an Ark resource that represents a pre-scheduled or
|
||||
// periodic Backup that should be run.
|
||||
@@ -73,6 +74,8 @@ type Schedule struct {
|
||||
Status ScheduleStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ScheduleList is a list of Schedules.
|
||||
type ScheduleList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
979
pkg/apis/ark/v1/zz_generated.deepcopy.go
Normal file
979
pkg/apis/ark/v1/zz_generated.deepcopy.go
Normal file
@@ -0,0 +1,979 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2017 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them.
|
||||
//
|
||||
// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented.
|
||||
func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc {
|
||||
return []conversion.GeneratedDeepCopyFunc{
|
||||
{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*Backup).DeepCopyInto(out.(*Backup))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&Backup{})},
|
||||
{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*BackupHooks).DeepCopyInto(out.(*BackupHooks))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&BackupHooks{})},
|
||||
{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*BackupList).DeepCopyInto(out.(*BackupList))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&BackupList{})},
|
||||
{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*BackupResourceHook).DeepCopyInto(out.(*BackupResourceHook))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&BackupResourceHook{})},
|
||||
{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*BackupResourceHookSpec).DeepCopyInto(out.(*BackupResourceHookSpec))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&BackupResourceHookSpec{})},
|
||||
{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*BackupSpec).DeepCopyInto(out.(*BackupSpec))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&BackupSpec{})},
|
||||
{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*BackupStatus).DeepCopyInto(out.(*BackupStatus))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&BackupStatus{})},
|
||||
{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*CloudProviderConfig).DeepCopyInto(out.(*CloudProviderConfig))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&CloudProviderConfig{})},
|
||||
{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*Config).DeepCopyInto(out.(*Config))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&Config{})},
|
||||
{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*ConfigList).DeepCopyInto(out.(*ConfigList))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&ConfigList{})},
|
||||
{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*DownloadRequest).DeepCopyInto(out.(*DownloadRequest))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&DownloadRequest{})},
|
||||
{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*DownloadRequestList).DeepCopyInto(out.(*DownloadRequestList))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&DownloadRequestList{})},
|
||||
{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*DownloadRequestSpec).DeepCopyInto(out.(*DownloadRequestSpec))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&DownloadRequestSpec{})},
|
||||
{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*DownloadRequestStatus).DeepCopyInto(out.(*DownloadRequestStatus))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&DownloadRequestStatus{})},
|
||||
{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*DownloadTarget).DeepCopyInto(out.(*DownloadTarget))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&DownloadTarget{})},
|
||||
{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*ExecHook).DeepCopyInto(out.(*ExecHook))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&ExecHook{})},
|
||||
{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*ObjectStorageProviderConfig).DeepCopyInto(out.(*ObjectStorageProviderConfig))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&ObjectStorageProviderConfig{})},
|
||||
{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*Restore).DeepCopyInto(out.(*Restore))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&Restore{})},
|
||||
{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*RestoreList).DeepCopyInto(out.(*RestoreList))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&RestoreList{})},
|
||||
{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*RestoreResult).DeepCopyInto(out.(*RestoreResult))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&RestoreResult{})},
|
||||
{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*RestoreSpec).DeepCopyInto(out.(*RestoreSpec))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&RestoreSpec{})},
|
||||
{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*RestoreStatus).DeepCopyInto(out.(*RestoreStatus))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&RestoreStatus{})},
|
||||
{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*Schedule).DeepCopyInto(out.(*Schedule))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&Schedule{})},
|
||||
{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*ScheduleList).DeepCopyInto(out.(*ScheduleList))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&ScheduleList{})},
|
||||
{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*ScheduleSpec).DeepCopyInto(out.(*ScheduleSpec))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&ScheduleSpec{})},
|
||||
{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*ScheduleStatus).DeepCopyInto(out.(*ScheduleStatus))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&ScheduleStatus{})},
|
||||
{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*VolumeBackupInfo).DeepCopyInto(out.(*VolumeBackupInfo))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&VolumeBackupInfo{})},
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Backup) DeepCopyInto(out *Backup) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backup.
|
||||
func (in *Backup) DeepCopy() *Backup {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Backup)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *Backup) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *BackupHooks) DeepCopyInto(out *BackupHooks) {
|
||||
*out = *in
|
||||
if in.Resources != nil {
|
||||
in, out := &in.Resources, &out.Resources
|
||||
*out = make([]BackupResourceHookSpec, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupHooks.
|
||||
func (in *BackupHooks) DeepCopy() *BackupHooks {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(BackupHooks)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *BackupList) DeepCopyInto(out *BackupList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Backup, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupList.
|
||||
func (in *BackupList) DeepCopy() *BackupList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(BackupList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *BackupList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *BackupResourceHook) DeepCopyInto(out *BackupResourceHook) {
|
||||
*out = *in
|
||||
if in.Exec != nil {
|
||||
in, out := &in.Exec, &out.Exec
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(ExecHook)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupResourceHook.
|
||||
func (in *BackupResourceHook) DeepCopy() *BackupResourceHook {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(BackupResourceHook)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *BackupResourceHookSpec) DeepCopyInto(out *BackupResourceHookSpec) {
|
||||
*out = *in
|
||||
if in.IncludedNamespaces != nil {
|
||||
in, out := &in.IncludedNamespaces, &out.IncludedNamespaces
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.ExcludedNamespaces != nil {
|
||||
in, out := &in.ExcludedNamespaces, &out.ExcludedNamespaces
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.IncludedResources != nil {
|
||||
in, out := &in.IncludedResources, &out.IncludedResources
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.ExcludedResources != nil {
|
||||
in, out := &in.ExcludedResources, &out.ExcludedResources
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.LabelSelector != nil {
|
||||
in, out := &in.LabelSelector, &out.LabelSelector
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(meta_v1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
if in.Hooks != nil {
|
||||
in, out := &in.Hooks, &out.Hooks
|
||||
*out = make([]BackupResourceHook, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupResourceHookSpec.
|
||||
func (in *BackupResourceHookSpec) DeepCopy() *BackupResourceHookSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(BackupResourceHookSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *BackupSpec) DeepCopyInto(out *BackupSpec) {
|
||||
*out = *in
|
||||
if in.IncludedNamespaces != nil {
|
||||
in, out := &in.IncludedNamespaces, &out.IncludedNamespaces
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.ExcludedNamespaces != nil {
|
||||
in, out := &in.ExcludedNamespaces, &out.ExcludedNamespaces
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.IncludedResources != nil {
|
||||
in, out := &in.IncludedResources, &out.IncludedResources
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.ExcludedResources != nil {
|
||||
in, out := &in.ExcludedResources, &out.ExcludedResources
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.LabelSelector != nil {
|
||||
in, out := &in.LabelSelector, &out.LabelSelector
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(meta_v1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
if in.SnapshotVolumes != nil {
|
||||
in, out := &in.SnapshotVolumes, &out.SnapshotVolumes
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
out.TTL = in.TTL
|
||||
if in.IncludeClusterResources != nil {
|
||||
in, out := &in.IncludeClusterResources, &out.IncludeClusterResources
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
in.Hooks.DeepCopyInto(&out.Hooks)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSpec.
|
||||
func (in *BackupSpec) DeepCopy() *BackupSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(BackupSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *BackupStatus) DeepCopyInto(out *BackupStatus) {
|
||||
*out = *in
|
||||
in.Expiration.DeepCopyInto(&out.Expiration)
|
||||
if in.VolumeBackups != nil {
|
||||
in, out := &in.VolumeBackups, &out.VolumeBackups
|
||||
*out = make(map[string]*VolumeBackupInfo, len(*in))
|
||||
for key, val := range *in {
|
||||
if val == nil {
|
||||
(*out)[key] = nil
|
||||
} else {
|
||||
(*out)[key] = new(VolumeBackupInfo)
|
||||
val.DeepCopyInto((*out)[key])
|
||||
}
|
||||
}
|
||||
}
|
||||
if in.ValidationErrors != nil {
|
||||
in, out := &in.ValidationErrors, &out.ValidationErrors
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStatus.
|
||||
func (in *BackupStatus) DeepCopy() *BackupStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(BackupStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CloudProviderConfig) DeepCopyInto(out *CloudProviderConfig) {
|
||||
*out = *in
|
||||
if in.Config != nil {
|
||||
in, out := &in.Config, &out.Config
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudProviderConfig.
|
||||
func (in *CloudProviderConfig) DeepCopy() *CloudProviderConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(CloudProviderConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Config) DeepCopyInto(out *Config) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
if in.PersistentVolumeProvider != nil {
|
||||
in, out := &in.PersistentVolumeProvider, &out.PersistentVolumeProvider
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(CloudProviderConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
in.BackupStorageProvider.DeepCopyInto(&out.BackupStorageProvider)
|
||||
out.BackupSyncPeriod = in.BackupSyncPeriod
|
||||
out.GCSyncPeriod = in.GCSyncPeriod
|
||||
out.ScheduleSyncPeriod = in.ScheduleSyncPeriod
|
||||
if in.ResourcePriorities != nil {
|
||||
in, out := &in.ResourcePriorities, &out.ResourcePriorities
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config.
|
||||
func (in *Config) DeepCopy() *Config {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Config)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *Config) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ConfigList) DeepCopyInto(out *ConfigList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Config, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigList.
|
||||
func (in *ConfigList) DeepCopy() *ConfigList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ConfigList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ConfigList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DownloadRequest) DeepCopyInto(out *DownloadRequest) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
out.Spec = in.Spec
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownloadRequest.
|
||||
func (in *DownloadRequest) DeepCopy() *DownloadRequest {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DownloadRequest)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *DownloadRequest) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DownloadRequestList) DeepCopyInto(out *DownloadRequestList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]DownloadRequest, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownloadRequestList.
|
||||
func (in *DownloadRequestList) DeepCopy() *DownloadRequestList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DownloadRequestList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *DownloadRequestList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DownloadRequestSpec) DeepCopyInto(out *DownloadRequestSpec) {
|
||||
*out = *in
|
||||
out.Target = in.Target
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownloadRequestSpec.
|
||||
func (in *DownloadRequestSpec) DeepCopy() *DownloadRequestSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DownloadRequestSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DownloadRequestStatus) DeepCopyInto(out *DownloadRequestStatus) {
|
||||
*out = *in
|
||||
in.Expiration.DeepCopyInto(&out.Expiration)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownloadRequestStatus.
|
||||
func (in *DownloadRequestStatus) DeepCopy() *DownloadRequestStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DownloadRequestStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DownloadTarget) DeepCopyInto(out *DownloadTarget) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownloadTarget.
|
||||
func (in *DownloadTarget) DeepCopy() *DownloadTarget {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DownloadTarget)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ExecHook) DeepCopyInto(out *ExecHook) {
|
||||
*out = *in
|
||||
if in.Command != nil {
|
||||
in, out := &in.Command, &out.Command
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
out.Timeout = in.Timeout
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecHook.
|
||||
func (in *ExecHook) DeepCopy() *ExecHook {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ExecHook)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ObjectStorageProviderConfig) DeepCopyInto(out *ObjectStorageProviderConfig) {
|
||||
*out = *in
|
||||
in.CloudProviderConfig.DeepCopyInto(&out.CloudProviderConfig)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageProviderConfig.
|
||||
func (in *ObjectStorageProviderConfig) DeepCopy() *ObjectStorageProviderConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ObjectStorageProviderConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Restore) DeepCopyInto(out *Restore) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Restore.
|
||||
func (in *Restore) DeepCopy() *Restore {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Restore)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *Restore) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RestoreList) DeepCopyInto(out *RestoreList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Restore, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreList.
|
||||
func (in *RestoreList) DeepCopy() *RestoreList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RestoreList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *RestoreList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RestoreResult) DeepCopyInto(out *RestoreResult) {
|
||||
*out = *in
|
||||
if in.Ark != nil {
|
||||
in, out := &in.Ark, &out.Ark
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Cluster != nil {
|
||||
in, out := &in.Cluster, &out.Cluster
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Namespaces != nil {
|
||||
in, out := &in.Namespaces, &out.Namespaces
|
||||
*out = make(map[string][]string, len(*in))
|
||||
for key, val := range *in {
|
||||
if val == nil {
|
||||
(*out)[key] = nil
|
||||
} else {
|
||||
(*out)[key] = make([]string, len(val))
|
||||
copy((*out)[key], val)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreResult.
|
||||
func (in *RestoreResult) DeepCopy() *RestoreResult {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RestoreResult)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RestoreSpec) DeepCopyInto(out *RestoreSpec) {
|
||||
*out = *in
|
||||
if in.IncludedNamespaces != nil {
|
||||
in, out := &in.IncludedNamespaces, &out.IncludedNamespaces
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.ExcludedNamespaces != nil {
|
||||
in, out := &in.ExcludedNamespaces, &out.ExcludedNamespaces
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.IncludedResources != nil {
|
||||
in, out := &in.IncludedResources, &out.IncludedResources
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.ExcludedResources != nil {
|
||||
in, out := &in.ExcludedResources, &out.ExcludedResources
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.NamespaceMapping != nil {
|
||||
in, out := &in.NamespaceMapping, &out.NamespaceMapping
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.LabelSelector != nil {
|
||||
in, out := &in.LabelSelector, &out.LabelSelector
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(meta_v1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
if in.RestorePVs != nil {
|
||||
in, out := &in.RestorePVs, &out.RestorePVs
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.IncludeClusterResources != nil {
|
||||
in, out := &in.IncludeClusterResources, &out.IncludeClusterResources
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreSpec.
|
||||
func (in *RestoreSpec) DeepCopy() *RestoreSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RestoreSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RestoreStatus) DeepCopyInto(out *RestoreStatus) {
|
||||
*out = *in
|
||||
if in.ValidationErrors != nil {
|
||||
in, out := &in.ValidationErrors, &out.ValidationErrors
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreStatus.
|
||||
func (in *RestoreStatus) DeepCopy() *RestoreStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RestoreStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Schedule) DeepCopyInto(out *Schedule) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Schedule.
|
||||
func (in *Schedule) DeepCopy() *Schedule {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Schedule)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *Schedule) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ScheduleList) DeepCopyInto(out *ScheduleList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Schedule, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleList.
|
||||
func (in *ScheduleList) DeepCopy() *ScheduleList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ScheduleList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ScheduleList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ScheduleSpec) DeepCopyInto(out *ScheduleSpec) {
|
||||
*out = *in
|
||||
in.Template.DeepCopyInto(&out.Template)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleSpec.
|
||||
func (in *ScheduleSpec) DeepCopy() *ScheduleSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ScheduleSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ScheduleStatus) DeepCopyInto(out *ScheduleStatus) {
|
||||
*out = *in
|
||||
in.LastBackup.DeepCopyInto(&out.LastBackup)
|
||||
if in.ValidationErrors != nil {
|
||||
in, out := &in.ValidationErrors, &out.ValidationErrors
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleStatus.
|
||||
func (in *ScheduleStatus) DeepCopy() *ScheduleStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ScheduleStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VolumeBackupInfo) DeepCopyInto(out *VolumeBackupInfo) {
|
||||
*out = *in
|
||||
if in.Iops != nil {
|
||||
in, out := &in.Iops, &out.Iops
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeBackupInfo.
|
||||
func (in *VolumeBackupInfo) DeepCopy() *VolumeBackupInfo {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VolumeBackupInfo)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
@@ -26,47 +26,33 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
kuberrs "k8s.io/apimachinery/pkg/util/errors"
|
||||
|
||||
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
"github.com/heptio/ark/pkg/discovery"
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
kubeutil "github.com/heptio/ark/pkg/util/kube"
|
||||
"github.com/heptio/ark/pkg/util/logging"
|
||||
)
|
||||
|
||||
// Backupper performs backups.
|
||||
type Backupper interface {
|
||||
// Backup takes a backup using the specification in the api.Backup and writes backup and log data
|
||||
// to the given writers.
|
||||
Backup(backup *api.Backup, backupFile, logFile io.Writer) error
|
||||
Backup(backup *api.Backup, backupFile, logFile io.Writer, actions []ItemAction) error
|
||||
}
|
||||
|
||||
// kubernetesBackupper implements Backupper.
|
||||
type kubernetesBackupper struct {
|
||||
dynamicFactory client.DynamicFactory
|
||||
discoveryHelper discovery.Helper
|
||||
actions map[schema.GroupResource]Action
|
||||
podCommandExecutor podCommandExecutor
|
||||
|
||||
dynamicFactory client.DynamicFactory
|
||||
discoveryHelper discovery.Helper
|
||||
podCommandExecutor podCommandExecutor
|
||||
groupBackupperFactory groupBackupperFactory
|
||||
}
|
||||
|
||||
// ResourceIdentifier describes a single item by its group, resource, namespace, and name.
|
||||
type ResourceIdentifier struct {
|
||||
schema.GroupResource
|
||||
Namespace string
|
||||
Name string
|
||||
}
|
||||
|
||||
// Action is an actor that performs an operation on an individual item being backed up.
|
||||
type Action interface {
|
||||
// Execute allows the Action to perform arbitrary logic with the item being backed up and the
|
||||
// backup itself. Implementations may return additional ResourceIdentifiers that indicate specific
|
||||
// items that also need to be backed up.
|
||||
Execute(log *logrus.Entry, item runtime.Unstructured, backup *api.Backup) ([]ResourceIdentifier, error)
|
||||
snapshotService cloudprovider.SnapshotService
|
||||
}
|
||||
|
||||
type itemKey struct {
|
||||
@@ -75,6 +61,14 @@ type itemKey struct {
|
||||
name string
|
||||
}
|
||||
|
||||
type resolvedAction struct {
|
||||
ItemAction
|
||||
|
||||
resourceIncludesExcludes *collections.IncludesExcludes
|
||||
namespaceIncludesExcludes *collections.IncludesExcludes
|
||||
selector labels.Selector
|
||||
}
|
||||
|
||||
func (i *itemKey) String() string {
|
||||
return fmt.Sprintf("resource=%s,namespace=%s,name=%s", i.resource, i.namespace, i.name)
|
||||
}
|
||||
@@ -83,38 +77,48 @@ func (i *itemKey) String() string {
|
||||
func NewKubernetesBackupper(
|
||||
discoveryHelper discovery.Helper,
|
||||
dynamicFactory client.DynamicFactory,
|
||||
actions map[string]Action,
|
||||
podCommandExecutor podCommandExecutor,
|
||||
snapshotService cloudprovider.SnapshotService,
|
||||
) (Backupper, error) {
|
||||
resolvedActions, err := resolveActions(discoveryHelper, actions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &kubernetesBackupper{
|
||||
discoveryHelper: discoveryHelper,
|
||||
dynamicFactory: dynamicFactory,
|
||||
actions: resolvedActions,
|
||||
podCommandExecutor: podCommandExecutor,
|
||||
|
||||
discoveryHelper: discoveryHelper,
|
||||
dynamicFactory: dynamicFactory,
|
||||
podCommandExecutor: podCommandExecutor,
|
||||
groupBackupperFactory: &defaultGroupBackupperFactory{},
|
||||
snapshotService: snapshotService,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// resolveActions resolves the string-based map of group-resources to actions and returns a map of
|
||||
// schema.GroupResources to actions.
|
||||
func resolveActions(helper discovery.Helper, actions map[string]Action) (map[schema.GroupResource]Action, error) {
|
||||
ret := make(map[schema.GroupResource]Action)
|
||||
func resolveActions(actions []ItemAction, helper discovery.Helper) ([]resolvedAction, error) {
|
||||
var resolved []resolvedAction
|
||||
|
||||
for resource, action := range actions {
|
||||
gvr, _, err := helper.ResourceFor(schema.ParseGroupResource(resource).WithVersion(""))
|
||||
for _, action := range actions {
|
||||
resourceSelector, err := action.AppliesTo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret[gvr.GroupResource()] = action
|
||||
|
||||
resources := getResourceIncludesExcludes(helper, resourceSelector.IncludedResources, resourceSelector.ExcludedResources)
|
||||
namespaces := collections.NewIncludesExcludes().Includes(resourceSelector.IncludedNamespaces...).Excludes(resourceSelector.ExcludedNamespaces...)
|
||||
|
||||
selector := labels.Everything()
|
||||
if resourceSelector.LabelSelector != "" {
|
||||
if selector, err = labels.Parse(resourceSelector.LabelSelector); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
res := resolvedAction{
|
||||
ItemAction: action,
|
||||
resourceIncludesExcludes: resources,
|
||||
namespaceIncludesExcludes: namespaces,
|
||||
selector: selector,
|
||||
}
|
||||
|
||||
resolved = append(resolved, res)
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
return resolved, nil
|
||||
}
|
||||
|
||||
// getResourceIncludesExcludes takes the lists of resources to include and exclude, uses the
|
||||
@@ -171,7 +175,7 @@ func getResourceHooks(hookSpecs []api.BackupResourceHookSpec, discoveryHelper di
|
||||
|
||||
// Backup backs up the items specified in the Backup, placing them in a gzip-compressed tar file
|
||||
// written to backupFile. The finalized api.Backup is written to metadata.
|
||||
func (kb *kubernetesBackupper) Backup(backup *api.Backup, backupFile, logFile io.Writer) error {
|
||||
func (kb *kubernetesBackupper) Backup(backup *api.Backup, backupFile, logFile io.Writer, actions []ItemAction) error {
|
||||
gzippedData := gzip.NewWriter(backupFile)
|
||||
defer gzippedData.Close()
|
||||
|
||||
@@ -183,6 +187,8 @@ func (kb *kubernetesBackupper) Backup(backup *api.Backup, backupFile, logFile io
|
||||
|
||||
logger := logrus.New()
|
||||
logger.Out = gzippedLog
|
||||
logger.Hooks.Add(&logging.ErrorLocationHook{})
|
||||
logger.Hooks.Add(&logging.LogLocationHook{})
|
||||
log := logger.WithField("backup", kubeutil.NamespaceAndName(backup))
|
||||
log.Info("Starting backup")
|
||||
|
||||
@@ -212,6 +218,11 @@ func (kb *kubernetesBackupper) Backup(backup *api.Backup, backupFile, logFile io
|
||||
"networkpolicies": newCohabitatingResource("networkpolicies", "extensions", "networking.k8s.io"),
|
||||
}
|
||||
|
||||
resolvedActions, err := resolveActions(actions, kb.discoveryHelper)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gb := kb.groupBackupperFactory.newGroupBackupper(
|
||||
log,
|
||||
backup,
|
||||
@@ -222,10 +233,11 @@ func (kb *kubernetesBackupper) Backup(backup *api.Backup, backupFile, logFile io
|
||||
kb.discoveryHelper,
|
||||
backedUpItems,
|
||||
cohabitatingResources,
|
||||
kb.actions,
|
||||
resolvedActions,
|
||||
kb.podCommandExecutor,
|
||||
tw,
|
||||
resourceHooks,
|
||||
kb.snapshotService,
|
||||
)
|
||||
|
||||
for _, group := range kb.discoveryHelper.Resources() {
|
||||
|
||||
@@ -30,25 +30,33 @@ import (
|
||||
// backupPVAction inspects a PersistentVolumeClaim for the PersistentVolume
|
||||
// that it references and backs it up
|
||||
type backupPVAction struct {
|
||||
log logrus.FieldLogger
|
||||
}
|
||||
|
||||
func NewBackupPVAction() Action {
|
||||
return &backupPVAction{}
|
||||
func NewBackupPVAction(log logrus.FieldLogger) ItemAction {
|
||||
return &backupPVAction{log: log}
|
||||
}
|
||||
|
||||
var pvGroupResource = schema.GroupResource{Group: "", Resource: "persistentvolumes"}
|
||||
|
||||
func (a *backupPVAction) AppliesTo() (ResourceSelector, error) {
|
||||
return ResourceSelector{
|
||||
IncludedResources: []string{"persistentvolumeclaims"},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Execute finds the PersistentVolume referenced by the provided
|
||||
// PersistentVolumeClaim and backs it up
|
||||
func (a *backupPVAction) Execute(log *logrus.Entry, item runtime.Unstructured, backup *v1.Backup) ([]ResourceIdentifier, error) {
|
||||
log.Info("Executing backupPVAction")
|
||||
func (a *backupPVAction) Execute(item runtime.Unstructured, backup *v1.Backup) (runtime.Unstructured, []ResourceIdentifier, error) {
|
||||
a.log.Info("Executing backupPVAction")
|
||||
|
||||
var additionalItems []ResourceIdentifier
|
||||
|
||||
pvc := item.UnstructuredContent()
|
||||
|
||||
volumeName, err := collections.GetString(pvc, "spec.volumeName")
|
||||
if err != nil {
|
||||
return additionalItems, errors.WithMessage(err, "unable to get spec.volumeName")
|
||||
return nil, nil, errors.WithMessage(err, "unable to get spec.volumeName")
|
||||
}
|
||||
|
||||
additionalItems = append(additionalItems, ResourceIdentifier{
|
||||
@@ -56,5 +64,5 @@ func (a *backupPVAction) Execute(log *logrus.Entry, item runtime.Unstructured, b
|
||||
Name: volumeName,
|
||||
})
|
||||
|
||||
return additionalItems, nil
|
||||
return item, additionalItems, nil
|
||||
}
|
||||
|
||||
@@ -35,13 +35,13 @@ func TestBackupPVAction(t *testing.T) {
|
||||
|
||||
backup := &v1.Backup{}
|
||||
|
||||
a := NewBackupPVAction()
|
||||
a := NewBackupPVAction(arktest.NewLogger())
|
||||
|
||||
additional, err := a.Execute(arktest.NewLogger(), pvc, backup)
|
||||
_, additional, err := a.Execute(pvc, backup)
|
||||
assert.EqualError(t, err, "unable to get spec.volumeName: key volumeName not found")
|
||||
|
||||
pvc.Object["spec"].(map[string]interface{})["volumeName"] = "myVolume"
|
||||
additional, err = a.Execute(arktest.NewLogger(), pvc, backup)
|
||||
_, additional, err = a.Execute(pvc, backup)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, additional, 1)
|
||||
assert.Equal(t, ResourceIdentifier{GroupResource: pvGroupResource, Name: "myVolume"}, additional[0])
|
||||
|
||||
@@ -41,6 +41,7 @@ import (
|
||||
|
||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
"github.com/heptio/ark/pkg/discovery"
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
kubeutil "github.com/heptio/ark/pkg/util/kube"
|
||||
@@ -55,49 +56,73 @@ var (
|
||||
)
|
||||
|
||||
type fakeAction struct {
|
||||
selector ResourceSelector
|
||||
ids []string
|
||||
backups []*v1.Backup
|
||||
backups []v1.Backup
|
||||
additionalItems []ResourceIdentifier
|
||||
}
|
||||
|
||||
var _ Action = &fakeAction{}
|
||||
var _ ItemAction = &fakeAction{}
|
||||
|
||||
func (a *fakeAction) Execute(log *logrus.Entry, item runtime.Unstructured, backup *v1.Backup) ([]ResourceIdentifier, error) {
|
||||
func newFakeAction(resource string) *fakeAction {
|
||||
return (&fakeAction{}).ForResource(resource)
|
||||
}
|
||||
|
||||
func (a *fakeAction) Execute(item runtime.Unstructured, backup *v1.Backup) (runtime.Unstructured, []ResourceIdentifier, error) {
|
||||
metadata, err := meta.Accessor(item)
|
||||
if err != nil {
|
||||
return a.additionalItems, err
|
||||
return item, a.additionalItems, err
|
||||
}
|
||||
a.ids = append(a.ids, kubeutil.NamespaceAndName(metadata))
|
||||
a.backups = append(a.backups, backup)
|
||||
a.backups = append(a.backups, *backup)
|
||||
|
||||
return a.additionalItems, nil
|
||||
return item, a.additionalItems, nil
|
||||
}
|
||||
|
||||
func (a *fakeAction) AppliesTo() (ResourceSelector, error) {
|
||||
return a.selector, nil
|
||||
}
|
||||
|
||||
func (a *fakeAction) ForResource(resource string) *fakeAction {
|
||||
a.selector.IncludedResources = []string{resource}
|
||||
return a
|
||||
}
|
||||
|
||||
func TestResolveActions(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input map[string]Action
|
||||
expected map[schema.GroupResource]Action
|
||||
input []ItemAction
|
||||
expected []resolvedAction
|
||||
resourcesWithErrors []string
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "empty input",
|
||||
input: map[string]Action{},
|
||||
expected: map[schema.GroupResource]Action{},
|
||||
input: []ItemAction{},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
name: "mapper error",
|
||||
input: map[string]Action{"badresource": &fakeAction{}},
|
||||
expected: map[schema.GroupResource]Action{},
|
||||
name: "resolve error",
|
||||
input: []ItemAction{&fakeAction{selector: ResourceSelector{LabelSelector: "=invalid-selector"}}},
|
||||
expected: nil,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "resolved",
|
||||
input: map[string]Action{"foo": &fakeAction{}, "bar": &fakeAction{}},
|
||||
expected: map[schema.GroupResource]Action{
|
||||
schema.GroupResource{Group: "somegroup", Resource: "foodies"}: &fakeAction{},
|
||||
schema.GroupResource{Group: "anothergroup", Resource: "barnacles"}: &fakeAction{},
|
||||
input: []ItemAction{newFakeAction("foo"), newFakeAction("bar")},
|
||||
expected: []resolvedAction{
|
||||
{
|
||||
ItemAction: newFakeAction("foo"),
|
||||
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("foodies.somegroup"),
|
||||
namespaceIncludesExcludes: collections.NewIncludesExcludes(),
|
||||
selector: labels.Everything(),
|
||||
},
|
||||
{
|
||||
ItemAction: newFakeAction("bar"),
|
||||
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("barnacles.anothergroup"),
|
||||
namespaceIncludesExcludes: collections.NewIncludesExcludes(),
|
||||
selector: labels.Everything(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -105,14 +130,14 @@ func TestResolveActions(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
resources := map[schema.GroupVersionResource]schema.GroupVersionResource{
|
||||
schema.GroupVersionResource{Resource: "foo"}: schema.GroupVersionResource{Group: "somegroup", Resource: "foodies"},
|
||||
schema.GroupVersionResource{Resource: "fie"}: schema.GroupVersionResource{Group: "somegroup", Resource: "fields"},
|
||||
schema.GroupVersionResource{Resource: "bar"}: schema.GroupVersionResource{Group: "anothergroup", Resource: "barnacles"},
|
||||
schema.GroupVersionResource{Resource: "baz"}: schema.GroupVersionResource{Group: "anothergroup", Resource: "bazaars"},
|
||||
{Resource: "foo"}: {Group: "somegroup", Resource: "foodies"},
|
||||
{Resource: "fie"}: {Group: "somegroup", Resource: "fields"},
|
||||
{Resource: "bar"}: {Group: "anothergroup", Resource: "barnacles"},
|
||||
{Resource: "baz"}: {Group: "anothergroup", Resource: "bazaars"},
|
||||
}
|
||||
discoveryHelper := arktest.NewFakeDiscoveryHelper(false, resources)
|
||||
|
||||
actual, err := resolveActions(discoveryHelper, test.input)
|
||||
actual, err := resolveActions(test.input, discoveryHelper)
|
||||
gotError := err != nil
|
||||
|
||||
if e, a := test.expectError, gotError; e != a {
|
||||
@@ -174,10 +199,10 @@ func TestGetResourceIncludesExcludes(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
resources := map[schema.GroupVersionResource]schema.GroupVersionResource{
|
||||
schema.GroupVersionResource{Resource: "foo"}: schema.GroupVersionResource{Group: "somegroup", Resource: "foodies"},
|
||||
schema.GroupVersionResource{Resource: "fie"}: schema.GroupVersionResource{Group: "somegroup", Resource: "fields"},
|
||||
schema.GroupVersionResource{Resource: "bar"}: schema.GroupVersionResource{Group: "anothergroup", Resource: "barnacles"},
|
||||
schema.GroupVersionResource{Resource: "baz"}: schema.GroupVersionResource{Group: "anothergroup", Resource: "bazaars"},
|
||||
{Resource: "foo"}: {Group: "somegroup", Resource: "foodies"},
|
||||
{Resource: "fie"}: {Group: "somegroup", Resource: "fields"},
|
||||
{Resource: "bar"}: {Group: "anothergroup", Resource: "barnacles"},
|
||||
{Resource: "baz"}: {Group: "anothergroup", Resource: "bazaars"},
|
||||
}
|
||||
discoveryHelper := arktest.NewFakeDiscoveryHelper(false, resources)
|
||||
|
||||
@@ -230,7 +255,7 @@ func TestGetNamespaceIncludesExcludes(t *testing.T) {
|
||||
var (
|
||||
v1Group = &metav1.APIResourceList{
|
||||
GroupVersion: "v1",
|
||||
APIResources: []metav1.APIResource{configMapsResource, podsResource},
|
||||
APIResources: []metav1.APIResource{configMapsResource, podsResource, namespacesResource},
|
||||
}
|
||||
|
||||
configMapsResource = metav1.APIResource{
|
||||
@@ -266,6 +291,14 @@ var (
|
||||
Verbs: metav1.Verbs([]string{"create", "update", "get", "list", "watch", "delete"}),
|
||||
}
|
||||
|
||||
namespacesResource = metav1.APIResource{
|
||||
Name: "namespaces",
|
||||
SingularName: "namespace",
|
||||
Namespaced: false,
|
||||
Kind: "Namespace",
|
||||
Verbs: metav1.Verbs([]string{"create", "update", "get", "list", "watch", "delete"}),
|
||||
}
|
||||
|
||||
certificatesGroup = &metav1.APIResourceList{
|
||||
GroupVersion: "certificates.k8s.io/v1beta1",
|
||||
APIResources: []metav1.APIResource{certificateSigningRequestsResource},
|
||||
@@ -341,7 +374,6 @@ func TestBackup(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
backup *v1.Backup
|
||||
actions map[string]Action
|
||||
expectedNamespaces *collections.IncludesExcludes
|
||||
expectedResources *collections.IncludesExcludes
|
||||
expectedLabelSelector string
|
||||
@@ -361,7 +393,6 @@ func TestBackup(t *testing.T) {
|
||||
ExcludedNamespaces: []string{"c", "d"},
|
||||
},
|
||||
},
|
||||
actions: map[string]Action{},
|
||||
expectedNamespaces: collections.NewIncludesExcludes().Includes("a", "b").Excludes("c", "d"),
|
||||
expectedResources: collections.NewIncludesExcludes().Includes("configmaps", "certificatesigningrequests.certificates.k8s.io", "roles.rbac.authorization.k8s.io"),
|
||||
expectedHooks: []resourceHook{},
|
||||
@@ -380,7 +411,6 @@ func TestBackup(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
actions: map[string]Action{},
|
||||
expectedNamespaces: collections.NewIncludesExcludes(),
|
||||
expectedResources: collections.NewIncludesExcludes(),
|
||||
expectedHooks: []resourceHook{},
|
||||
@@ -394,7 +424,6 @@ func TestBackup(t *testing.T) {
|
||||
{
|
||||
name: "backupGroup errors",
|
||||
backup: &v1.Backup{},
|
||||
actions: map[string]Action{},
|
||||
expectedNamespaces: collections.NewIncludesExcludes(),
|
||||
expectedResources: collections.NewIncludesExcludes(),
|
||||
expectedHooks: []resourceHook{},
|
||||
@@ -432,7 +461,6 @@ func TestBackup(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
actions: map[string]Action{},
|
||||
expectedNamespaces: collections.NewIncludesExcludes(),
|
||||
expectedResources: collections.NewIncludesExcludes(),
|
||||
expectedHooks: []resourceHook{
|
||||
@@ -463,9 +491,9 @@ func TestBackup(t *testing.T) {
|
||||
discoveryHelper := &arktest.FakeDiscoveryHelper{
|
||||
Mapper: &arktest.FakeMapper{
|
||||
Resources: map[schema.GroupVersionResource]schema.GroupVersionResource{
|
||||
schema.GroupVersionResource{Resource: "cm"}: schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"},
|
||||
schema.GroupVersionResource{Resource: "csr"}: schema.GroupVersionResource{Group: "certificates.k8s.io", Version: "v1beta1", Resource: "certificatesigningrequests"},
|
||||
schema.GroupVersionResource{Resource: "roles"}: schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "roles"},
|
||||
{Resource: "cm"}: {Group: "", Version: "v1", Resource: "configmaps"},
|
||||
{Resource: "csr"}: {Group: "certificates.k8s.io", Version: "v1beta1", Resource: "certificatesigningrequests"},
|
||||
{Resource: "roles"}: {Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "roles"},
|
||||
},
|
||||
},
|
||||
ResourceList: []*metav1.APIResourceList{
|
||||
@@ -483,8 +511,8 @@ func TestBackup(t *testing.T) {
|
||||
b, err := NewKubernetesBackupper(
|
||||
discoveryHelper,
|
||||
dynamicFactory,
|
||||
test.actions,
|
||||
podCommandExecutor,
|
||||
nil,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
kb := b.(*kubernetesBackupper)
|
||||
@@ -511,10 +539,11 @@ func TestBackup(t *testing.T) {
|
||||
discoveryHelper,
|
||||
map[itemKey]struct{}{}, // backedUpItems
|
||||
cohabitatingResources,
|
||||
kb.actions,
|
||||
mock.Anything,
|
||||
kb.podCommandExecutor,
|
||||
mock.Anything, // tarWriter
|
||||
test.expectedHooks,
|
||||
mock.Anything,
|
||||
).Return(groupBackupper)
|
||||
|
||||
for group, err := range test.backupGroupErrors {
|
||||
@@ -523,7 +552,7 @@ func TestBackup(t *testing.T) {
|
||||
|
||||
var backupFile, logFile bytes.Buffer
|
||||
|
||||
err = b.Backup(test.backup, &backupFile, &logFile)
|
||||
err = b.Backup(test.backup, &backupFile, &logFile, nil)
|
||||
defer func() {
|
||||
// print log if anything failed
|
||||
if t.Failed() {
|
||||
@@ -552,7 +581,7 @@ type mockGroupBackupperFactory struct {
|
||||
}
|
||||
|
||||
func (f *mockGroupBackupperFactory) newGroupBackupper(
|
||||
log *logrus.Entry,
|
||||
log logrus.FieldLogger,
|
||||
backup *v1.Backup,
|
||||
namespaces, resources *collections.IncludesExcludes,
|
||||
labelSelector string,
|
||||
@@ -560,10 +589,11 @@ func (f *mockGroupBackupperFactory) newGroupBackupper(
|
||||
discoveryHelper discovery.Helper,
|
||||
backedUpItems map[itemKey]struct{},
|
||||
cohabitatingResources map[string]*cohabitatingResource,
|
||||
actions map[schema.GroupResource]Action,
|
||||
actions []resolvedAction,
|
||||
podCommandExecutor podCommandExecutor,
|
||||
tarWriter tarWriter,
|
||||
resourceHooks []resourceHook,
|
||||
snapshotService cloudprovider.SnapshotService,
|
||||
) groupBackupper {
|
||||
args := f.Called(
|
||||
log,
|
||||
@@ -579,6 +609,7 @@ func (f *mockGroupBackupperFactory) newGroupBackupper(
|
||||
podCommandExecutor,
|
||||
tarWriter,
|
||||
resourceHooks,
|
||||
snapshotService,
|
||||
)
|
||||
return args.Get(0).(groupBackupper)
|
||||
}
|
||||
|
||||
@@ -19,19 +19,21 @@ package backup
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kuberrs "k8s.io/apimachinery/pkg/util/errors"
|
||||
|
||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
"github.com/heptio/ark/pkg/discovery"
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
"github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
kuberrs "k8s.io/apimachinery/pkg/util/errors"
|
||||
)
|
||||
|
||||
type groupBackupperFactory interface {
|
||||
newGroupBackupper(
|
||||
log *logrus.Entry,
|
||||
log logrus.FieldLogger,
|
||||
backup *v1.Backup,
|
||||
namespaces, resources *collections.IncludesExcludes,
|
||||
labelSelector string,
|
||||
@@ -39,17 +41,18 @@ type groupBackupperFactory interface {
|
||||
discoveryHelper discovery.Helper,
|
||||
backedUpItems map[itemKey]struct{},
|
||||
cohabitatingResources map[string]*cohabitatingResource,
|
||||
actions map[schema.GroupResource]Action,
|
||||
actions []resolvedAction,
|
||||
podCommandExecutor podCommandExecutor,
|
||||
tarWriter tarWriter,
|
||||
resourceHooks []resourceHook,
|
||||
snapshotService cloudprovider.SnapshotService,
|
||||
) groupBackupper
|
||||
}
|
||||
|
||||
type defaultGroupBackupperFactory struct{}
|
||||
|
||||
func (f *defaultGroupBackupperFactory) newGroupBackupper(
|
||||
log *logrus.Entry,
|
||||
log logrus.FieldLogger,
|
||||
backup *v1.Backup,
|
||||
namespaces, resources *collections.IncludesExcludes,
|
||||
labelSelector string,
|
||||
@@ -57,26 +60,27 @@ func (f *defaultGroupBackupperFactory) newGroupBackupper(
|
||||
discoveryHelper discovery.Helper,
|
||||
backedUpItems map[itemKey]struct{},
|
||||
cohabitatingResources map[string]*cohabitatingResource,
|
||||
actions map[schema.GroupResource]Action,
|
||||
actions []resolvedAction,
|
||||
podCommandExecutor podCommandExecutor,
|
||||
tarWriter tarWriter,
|
||||
resourceHooks []resourceHook,
|
||||
snapshotService cloudprovider.SnapshotService,
|
||||
) groupBackupper {
|
||||
return &defaultGroupBackupper{
|
||||
log: log,
|
||||
backup: backup,
|
||||
namespaces: namespaces,
|
||||
resources: resources,
|
||||
labelSelector: labelSelector,
|
||||
dynamicFactory: dynamicFactory,
|
||||
discoveryHelper: discoveryHelper,
|
||||
backedUpItems: backedUpItems,
|
||||
cohabitatingResources: cohabitatingResources,
|
||||
actions: actions,
|
||||
podCommandExecutor: podCommandExecutor,
|
||||
tarWriter: tarWriter,
|
||||
resourceHooks: resourceHooks,
|
||||
|
||||
log: log,
|
||||
backup: backup,
|
||||
namespaces: namespaces,
|
||||
resources: resources,
|
||||
labelSelector: labelSelector,
|
||||
dynamicFactory: dynamicFactory,
|
||||
discoveryHelper: discoveryHelper,
|
||||
backedUpItems: backedUpItems,
|
||||
cohabitatingResources: cohabitatingResources,
|
||||
actions: actions,
|
||||
podCommandExecutor: podCommandExecutor,
|
||||
tarWriter: tarWriter,
|
||||
resourceHooks: resourceHooks,
|
||||
snapshotService: snapshotService,
|
||||
resourceBackupperFactory: &defaultResourceBackupperFactory{},
|
||||
}
|
||||
}
|
||||
@@ -86,7 +90,7 @@ type groupBackupper interface {
|
||||
}
|
||||
|
||||
type defaultGroupBackupper struct {
|
||||
log *logrus.Entry
|
||||
log logrus.FieldLogger
|
||||
backup *v1.Backup
|
||||
namespaces, resources *collections.IncludesExcludes
|
||||
labelSelector string
|
||||
@@ -94,10 +98,11 @@ type defaultGroupBackupper struct {
|
||||
discoveryHelper discovery.Helper
|
||||
backedUpItems map[itemKey]struct{}
|
||||
cohabitatingResources map[string]*cohabitatingResource
|
||||
actions map[schema.GroupResource]Action
|
||||
actions []resolvedAction
|
||||
podCommandExecutor podCommandExecutor
|
||||
tarWriter tarWriter
|
||||
resourceHooks []resourceHook
|
||||
snapshotService cloudprovider.SnapshotService
|
||||
resourceBackupperFactory resourceBackupperFactory
|
||||
}
|
||||
|
||||
@@ -121,6 +126,7 @@ func (gb *defaultGroupBackupper) backupGroup(group *metav1.APIResourceList) erro
|
||||
gb.podCommandExecutor,
|
||||
gb.tarWriter,
|
||||
gb.resourceHooks,
|
||||
gb.snapshotService,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
|
||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
"github.com/heptio/ark/pkg/discovery"
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
arktest "github.com/heptio/ark/pkg/util/test"
|
||||
@@ -45,7 +46,7 @@ func TestBackupGroup(t *testing.T) {
|
||||
discoveryHelper := arktest.NewFakeDiscoveryHelper(true, nil)
|
||||
|
||||
backedUpItems := map[itemKey]struct{}{
|
||||
{resource: "a", namespace: "b", name: "c"}: struct{}{},
|
||||
{resource: "a", namespace: "b", name: "c"}: {},
|
||||
}
|
||||
|
||||
cohabitatingResources := map[string]*cohabitatingResource{
|
||||
@@ -56,8 +57,11 @@ func TestBackupGroup(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
actions := map[schema.GroupResource]Action{
|
||||
schema.GroupResource{Group: "", Resource: "pods"}: &fakeAction{},
|
||||
actions := []resolvedAction{
|
||||
{
|
||||
ItemAction: newFakeAction("pods"),
|
||||
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("pods"),
|
||||
},
|
||||
}
|
||||
|
||||
podCommandExecutor := &mockPodCommandExecutor{}
|
||||
@@ -83,6 +87,7 @@ func TestBackupGroup(t *testing.T) {
|
||||
podCommandExecutor,
|
||||
tarWriter,
|
||||
resourceHooks,
|
||||
nil,
|
||||
).(*defaultGroupBackupper)
|
||||
|
||||
resourceBackupperFactory := &mockResourceBackupperFactory{}
|
||||
@@ -106,6 +111,7 @@ func TestBackupGroup(t *testing.T) {
|
||||
podCommandExecutor,
|
||||
tarWriter,
|
||||
resourceHooks,
|
||||
nil,
|
||||
).Return(resourceBackupper)
|
||||
|
||||
group := &metav1.APIResourceList{
|
||||
@@ -140,7 +146,7 @@ type mockResourceBackupperFactory struct {
|
||||
}
|
||||
|
||||
func (rbf *mockResourceBackupperFactory) newResourceBackupper(
|
||||
log *logrus.Entry,
|
||||
log logrus.FieldLogger,
|
||||
backup *v1.Backup,
|
||||
namespaces *collections.IncludesExcludes,
|
||||
resources *collections.IncludesExcludes,
|
||||
@@ -149,10 +155,11 @@ func (rbf *mockResourceBackupperFactory) newResourceBackupper(
|
||||
discoveryHelper discovery.Helper,
|
||||
backedUpItems map[itemKey]struct{},
|
||||
cohabitatingResources map[string]*cohabitatingResource,
|
||||
actions map[schema.GroupResource]Action,
|
||||
actions []resolvedAction,
|
||||
podCommandExecutor podCommandExecutor,
|
||||
tarWriter tarWriter,
|
||||
resourceHooks []resourceHook,
|
||||
snapshotService cloudprovider.SnapshotService,
|
||||
) resourceBackupper {
|
||||
args := rbf.Called(
|
||||
log,
|
||||
@@ -168,6 +175,7 @@ func (rbf *mockResourceBackupperFactory) newResourceBackupper(
|
||||
podCommandExecutor,
|
||||
tarWriter,
|
||||
resourceHooks,
|
||||
snapshotService,
|
||||
)
|
||||
return args.Get(0).(resourceBackupper)
|
||||
}
|
||||
|
||||
74
pkg/backup/item_action.go
Normal file
74
pkg/backup/item_action.go
Normal file
@@ -0,0 +1,74 @@
|
||||
/*
|
||||
Copyright 2017 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package backup
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
)
|
||||
|
||||
// ItemAction is an actor that performs an operation on an individual item being backed up.
|
||||
type ItemAction interface {
|
||||
// AppliesTo returns information about which resources this action should be invoked for.
|
||||
// An ItemAction's Execute function will only be invoked on items that match the returned
|
||||
// selector. A zero-valued ResourceSelector matches all resources.
|
||||
AppliesTo() (ResourceSelector, error)
|
||||
|
||||
// Execute allows the ItemAction to perform arbitrary logic with the item being backed up,
|
||||
// including mutating the item itself prior to backup. The item (unmodified or modified)
|
||||
// should be returned, along with an optional slice of ResourceIdentifiers specifying
|
||||
// additional related items that should be backed up.
|
||||
Execute(item runtime.Unstructured, backup *api.Backup) (runtime.Unstructured, []ResourceIdentifier, error)
|
||||
}
|
||||
|
||||
// ResourceIdentifier describes a single item by its group, resource, namespace, and name.
|
||||
type ResourceIdentifier struct {
|
||||
schema.GroupResource
|
||||
Namespace string
|
||||
Name string
|
||||
}
|
||||
|
||||
// ResourceSelector is a collection of included/excluded namespaces,
|
||||
// included/excluded resources, and a label-selector that can be used
|
||||
// to match a set of items from a cluster.
|
||||
type ResourceSelector struct {
|
||||
// IncludedNamespaces is a slice of namespace names to match. All
|
||||
// namespaces in this slice, except those in ExcludedNamespaces,
|
||||
// will be matched. A nil/empty slice matches all namespaces.
|
||||
IncludedNamespaces []string
|
||||
// ExcludedNamespaces is a slice of namespace names to exclude.
|
||||
// All namespaces in IncludedNamespaces, *except* those in
|
||||
// this slice, will be matched.
|
||||
ExcludedNamespaces []string
|
||||
// IncludedResources is a slice of resources to match. Resources
|
||||
// may be specified as full names (e.g. "services") or abbreviations
|
||||
// (e.g. "svc"). All resources in this slice, except those in
|
||||
// ExcludedResources, will be matched. A nil/empty slice matches
|
||||
// all resources.
|
||||
IncludedResources []string
|
||||
// ExcludedResources is a slice of resources to exclude.
|
||||
// Resources may be specified as full names (e.g. "services") or
|
||||
// abbreviations (e.g. "svc"). All resources in IncludedResources,
|
||||
// *except* those in this slice, will be matched.
|
||||
ExcludedResources []string
|
||||
// LabelSelector is a string representation of a selector to apply
|
||||
// when matching resources. See "k8s.io/apimachinery/pkg/labels".Parse()
|
||||
// for details on syntax.
|
||||
LabelSelector string
|
||||
}
|
||||
@@ -22,16 +22,21 @@ import (
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
"github.com/heptio/ark/pkg/discovery"
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
"github.com/heptio/ark/pkg/discovery"
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
"github.com/heptio/ark/pkg/util/logging"
|
||||
)
|
||||
|
||||
type itemBackupperFactory interface {
|
||||
@@ -39,12 +44,13 @@ type itemBackupperFactory interface {
|
||||
backup *api.Backup,
|
||||
namespaces, resources *collections.IncludesExcludes,
|
||||
backedUpItems map[itemKey]struct{},
|
||||
actions map[schema.GroupResource]Action,
|
||||
actions []resolvedAction,
|
||||
podCommandExecutor podCommandExecutor,
|
||||
tarWriter tarWriter,
|
||||
resourceHooks []resourceHook,
|
||||
dynamicFactory client.DynamicFactory,
|
||||
discoveryHelper discovery.Helper,
|
||||
snapshotService cloudprovider.SnapshotService,
|
||||
) ItemBackupper
|
||||
}
|
||||
|
||||
@@ -54,12 +60,13 @@ func (f *defaultItemBackupperFactory) newItemBackupper(
|
||||
backup *api.Backup,
|
||||
namespaces, resources *collections.IncludesExcludes,
|
||||
backedUpItems map[itemKey]struct{},
|
||||
actions map[schema.GroupResource]Action,
|
||||
actions []resolvedAction,
|
||||
podCommandExecutor podCommandExecutor,
|
||||
tarWriter tarWriter,
|
||||
resourceHooks []resourceHook,
|
||||
dynamicFactory client.DynamicFactory,
|
||||
discoveryHelper discovery.Helper,
|
||||
snapshotService cloudprovider.SnapshotService,
|
||||
) ItemBackupper {
|
||||
ib := &defaultItemBackupper{
|
||||
backup: backup,
|
||||
@@ -71,7 +78,7 @@ func (f *defaultItemBackupperFactory) newItemBackupper(
|
||||
resourceHooks: resourceHooks,
|
||||
dynamicFactory: dynamicFactory,
|
||||
discoveryHelper: discoveryHelper,
|
||||
|
||||
snapshotService: snapshotService,
|
||||
itemHookHandler: &defaultItemHookHandler{
|
||||
podCommandExecutor: podCommandExecutor,
|
||||
},
|
||||
@@ -84,7 +91,7 @@ func (f *defaultItemBackupperFactory) newItemBackupper(
|
||||
}
|
||||
|
||||
type ItemBackupper interface {
|
||||
backupItem(logger *logrus.Entry, obj runtime.Unstructured, groupResource schema.GroupResource) error
|
||||
backupItem(logger logrus.FieldLogger, obj runtime.Unstructured, groupResource schema.GroupResource) error
|
||||
}
|
||||
|
||||
type defaultItemBackupper struct {
|
||||
@@ -92,21 +99,23 @@ type defaultItemBackupper struct {
|
||||
namespaces *collections.IncludesExcludes
|
||||
resources *collections.IncludesExcludes
|
||||
backedUpItems map[itemKey]struct{}
|
||||
actions map[schema.GroupResource]Action
|
||||
actions []resolvedAction
|
||||
tarWriter tarWriter
|
||||
resourceHooks []resourceHook
|
||||
dynamicFactory client.DynamicFactory
|
||||
discoveryHelper discovery.Helper
|
||||
snapshotService cloudprovider.SnapshotService
|
||||
|
||||
itemHookHandler itemHookHandler
|
||||
additionalItemBackupper ItemBackupper
|
||||
}
|
||||
|
||||
var podsGroupResource = schema.GroupResource{Group: "", Resource: "pods"}
|
||||
var namespacesGroupResource = schema.GroupResource{Group: "", Resource: "namespaces"}
|
||||
|
||||
// backupItem backs up an individual item to tarWriter. The item may be excluded based on the
|
||||
// namespaces IncludesExcludes list.
|
||||
func (ib *defaultItemBackupper) backupItem(logger *logrus.Entry, obj runtime.Unstructured, groupResource schema.GroupResource) error {
|
||||
func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstructured, groupResource schema.GroupResource) error {
|
||||
metadata, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -127,7 +136,9 @@ func (ib *defaultItemBackupper) backupItem(logger *logrus.Entry, obj runtime.Uns
|
||||
return nil
|
||||
}
|
||||
|
||||
if namespace == "" && ib.backup.Spec.IncludeClusterResources != nil && !*ib.backup.Spec.IncludeClusterResources {
|
||||
// NOTE: we specifically allow namespaces to be backed up even if IncludeClusterResources is
|
||||
// false.
|
||||
if namespace == "" && groupResource != namespacesGroupResource && ib.backup.Spec.IncludeClusterResources != nil && !*ib.backup.Spec.IncludeClusterResources {
|
||||
log.Info("Excluding item because resource is cluster-scoped and backup.spec.includeClusterResources is false")
|
||||
return nil
|
||||
}
|
||||
@@ -151,18 +162,38 @@ func (ib *defaultItemBackupper) backupItem(logger *logrus.Entry, obj runtime.Uns
|
||||
|
||||
log.Info("Backing up resource")
|
||||
|
||||
item := obj.UnstructuredContent()
|
||||
// Never save status
|
||||
delete(item, "status")
|
||||
delete(obj.UnstructuredContent(), "status")
|
||||
|
||||
if err := ib.itemHookHandler.handleHooks(log, groupResource, obj, ib.resourceHooks); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if action, found := ib.actions[groupResource]; found {
|
||||
for _, action := range ib.actions {
|
||||
if !action.resourceIncludesExcludes.ShouldInclude(groupResource.String()) {
|
||||
log.Debug("Skipping action because it does not apply to this resource")
|
||||
continue
|
||||
}
|
||||
|
||||
if namespace != "" && !action.namespaceIncludesExcludes.ShouldInclude(namespace) {
|
||||
log.Debug("Skipping action because it does not apply to this namespace")
|
||||
continue
|
||||
}
|
||||
|
||||
if !action.selector.Matches(labels.Set(metadata.GetLabels())) {
|
||||
log.Debug("Skipping action because label selector does not match")
|
||||
continue
|
||||
}
|
||||
|
||||
log.Info("Executing custom action")
|
||||
|
||||
if additionalItemIdentifiers, err := action.Execute(log, obj, ib.backup); err == nil {
|
||||
if logSetter, ok := action.ItemAction.(logging.LogSetter); ok {
|
||||
logSetter.SetLog(log)
|
||||
}
|
||||
|
||||
if updatedItem, additionalItemIdentifiers, err := action.Execute(obj, ib.backup); err == nil {
|
||||
obj = updatedItem
|
||||
|
||||
for _, additionalItem := range additionalItemIdentifiers {
|
||||
gvr, resource, err := ib.discoveryHelper.ResourceFor(additionalItem.GroupResource.WithVersion(""))
|
||||
if err != nil {
|
||||
@@ -186,6 +217,16 @@ func (ib *defaultItemBackupper) backupItem(logger *logrus.Entry, obj runtime.Uns
|
||||
}
|
||||
}
|
||||
|
||||
if groupResource == pvGroupResource {
|
||||
if ib.snapshotService == nil {
|
||||
log.Debug("Skipping Persistent Volume snapshot because they're not enabled.")
|
||||
} else {
|
||||
if err := ib.takePVSnapshot(obj, ib.backup, log); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var filePath string
|
||||
if namespace != "" {
|
||||
filePath = filepath.Join(api.ResourcesDir, groupResource.String(), api.NamespaceScopedDir, namespace, name+".json")
|
||||
@@ -193,7 +234,7 @@ func (ib *defaultItemBackupper) backupItem(logger *logrus.Entry, obj runtime.Uns
|
||||
filePath = filepath.Join(api.ResourcesDir, groupResource.String(), api.ClusterScopedDir, name+".json")
|
||||
}
|
||||
|
||||
itemBytes, err := json.Marshal(item)
|
||||
itemBytes, err := json.Marshal(obj.UnstructuredContent())
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
@@ -216,3 +257,72 @@ func (ib *defaultItemBackupper) backupItem(logger *logrus.Entry, obj runtime.Uns
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// zoneLabel is the label that stores availability-zone info
|
||||
// on PVs
|
||||
const zoneLabel = "failure-domain.beta.kubernetes.io/zone"
|
||||
|
||||
// takePVSnapshot triggers a snapshot for the volume/disk underlying a PersistentVolume if the provided
|
||||
// backup has volume snapshots enabled and the PV is of a compatible type. Also records cloud
|
||||
// disk type and IOPS (if applicable) to be able to restore to current state later.
|
||||
func (ib *defaultItemBackupper) takePVSnapshot(pv runtime.Unstructured, backup *api.Backup, log logrus.FieldLogger) error {
|
||||
log.Info("Executing takePVSnapshot")
|
||||
|
||||
if backup.Spec.SnapshotVolumes != nil && !*backup.Spec.SnapshotVolumes {
|
||||
log.Info("Backup has volume snapshots disabled; skipping volume snapshot action.")
|
||||
return nil
|
||||
}
|
||||
|
||||
metadata, err := meta.Accessor(pv)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
name := metadata.GetName()
|
||||
var pvFailureDomainZone string
|
||||
labels := metadata.GetLabels()
|
||||
|
||||
if labels[zoneLabel] != "" {
|
||||
pvFailureDomainZone = labels[zoneLabel]
|
||||
} else {
|
||||
log.Infof("label %q is not present on PersistentVolume", zoneLabel)
|
||||
}
|
||||
|
||||
volumeID, err := ib.snapshotService.GetVolumeID(pv)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error getting volume ID for PersistentVolume")
|
||||
}
|
||||
if volumeID == "" {
|
||||
log.Info("PersistentVolume is not a supported volume type for snapshots, skipping.")
|
||||
return nil
|
||||
}
|
||||
|
||||
log = log.WithField("volumeID", volumeID)
|
||||
|
||||
log.Info("Snapshotting PersistentVolume")
|
||||
snapshotID, err := ib.snapshotService.CreateSnapshot(volumeID, pvFailureDomainZone)
|
||||
if err != nil {
|
||||
// log+error on purpose - log goes to the per-backup log file, error goes to the backup
|
||||
log.WithError(err).Error("error creating snapshot")
|
||||
return errors.WithMessage(err, "error creating snapshot")
|
||||
}
|
||||
|
||||
volumeType, iops, err := ib.snapshotService.GetVolumeInfo(volumeID, pvFailureDomainZone)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error getting volume info")
|
||||
return errors.WithMessage(err, "error getting volume info")
|
||||
}
|
||||
|
||||
if backup.Status.VolumeBackups == nil {
|
||||
backup.Status.VolumeBackups = make(map[string]*api.VolumeBackupInfo)
|
||||
}
|
||||
|
||||
backup.Status.VolumeBackups[name] = &api.VolumeBackupInfo{
|
||||
SnapshotID: snapshotID,
|
||||
Type: volumeType,
|
||||
Iops: iops,
|
||||
AvailabilityZone: pvFailureDomainZone,
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -22,8 +22,10 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
arktest "github.com/heptio/ark/pkg/util/test"
|
||||
"github.com/pkg/errors"
|
||||
@@ -33,6 +35,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
@@ -83,7 +86,7 @@ func TestBackupItemSkips(t *testing.T) {
|
||||
namespaces: collections.NewIncludesExcludes(),
|
||||
resources: collections.NewIncludesExcludes(),
|
||||
backedUpItems: map[itemKey]struct{}{
|
||||
{resource: "bar.foo", namespace: "ns", name: "foo"}: struct{}{},
|
||||
{resource: "bar.foo", namespace: "ns", name: "foo"}: {},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -103,6 +106,23 @@ func TestBackupItemSkips(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackupItemSkipsClusterScopedResourceWhenIncludeClusterResourcesFalse(t *testing.T) {
|
||||
f := false
|
||||
ib := &defaultItemBackupper{
|
||||
backup: &v1.Backup{
|
||||
Spec: v1.BackupSpec{
|
||||
IncludeClusterResources: &f,
|
||||
},
|
||||
},
|
||||
namespaces: collections.NewIncludesExcludes(),
|
||||
resources: collections.NewIncludesExcludes(),
|
||||
}
|
||||
|
||||
u := unstructuredOrDie(`{"apiVersion":"v1","kind":"Foo","metadata":{"name":"bar"}}`)
|
||||
err := ib.backupItem(arktest.NewLogger(), u, schema.GroupResource{Group: "foo", Resource: "bar"})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestBackupItemNoSkips(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -117,6 +137,8 @@ func TestBackupItemNoSkips(t *testing.T) {
|
||||
expectedActionID string
|
||||
customActionAdditionalItemIdentifiers []ResourceIdentifier
|
||||
customActionAdditionalItems []runtime.Unstructured
|
||||
groupResource string
|
||||
snapshottableVolumes map[string]api.VolumeBackupInfo
|
||||
}{
|
||||
{
|
||||
name: "explicit namespace include",
|
||||
@@ -206,12 +228,33 @@ func TestBackupItemNoSkips(t *testing.T) {
|
||||
unstructuredOrDie(`{"apiVersion":"g2/v1","kind":"r1","metadata":{"namespace":"ns2","name":"n2"}}`),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "takePVSnapshot is not invoked for PVs when snapshotService == nil",
|
||||
namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||
item: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv", "labels": {"failure-domain.beta.kubernetes.io/zone": "us-east-1c"}}, "spec": {"awsElasticBlockStore": {"volumeID": "aws://us-east-1c/vol-abc123"}}}`,
|
||||
expectError: false,
|
||||
expectExcluded: false,
|
||||
expectedTarHeaderName: "resources/persistentvolumes/cluster/mypv.json",
|
||||
groupResource: "persistentvolumes",
|
||||
},
|
||||
{
|
||||
name: "takePVSnapshot is invoked for PVs when snapshotService != nil",
|
||||
namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||
item: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv", "labels": {"failure-domain.beta.kubernetes.io/zone": "us-east-1c"}}, "spec": {"awsElasticBlockStore": {"volumeID": "aws://us-east-1c/vol-abc123"}}}`,
|
||||
expectError: false,
|
||||
expectExcluded: false,
|
||||
expectedTarHeaderName: "resources/persistentvolumes/cluster/mypv.json",
|
||||
groupResource: "persistentvolumes",
|
||||
snapshottableVolumes: map[string]api.VolumeBackupInfo{
|
||||
"vol-abc123": {SnapshotID: "snapshot-1", AvailabilityZone: "us-east-1c"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
var (
|
||||
actions map[schema.GroupResource]Action
|
||||
actions []resolvedAction
|
||||
action *fakeAction
|
||||
backup = &v1.Backup{}
|
||||
groupResource = schema.ParseGroupResource("resource.group")
|
||||
@@ -220,6 +263,10 @@ func TestBackupItemNoSkips(t *testing.T) {
|
||||
w = &fakeTarWriter{}
|
||||
)
|
||||
|
||||
if test.groupResource != "" {
|
||||
groupResource = schema.ParseGroupResource(test.groupResource)
|
||||
}
|
||||
|
||||
item, err := getAsMap(test.item)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -241,8 +288,13 @@ func TestBackupItemNoSkips(t *testing.T) {
|
||||
action = &fakeAction{
|
||||
additionalItems: test.customActionAdditionalItemIdentifiers,
|
||||
}
|
||||
actions = map[schema.GroupResource]Action{
|
||||
groupResource: action,
|
||||
actions = []resolvedAction{
|
||||
{
|
||||
ItemAction: action,
|
||||
namespaceIncludesExcludes: collections.NewIncludesExcludes(),
|
||||
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes(groupResource.String()),
|
||||
selector: labels.Everything(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -267,8 +319,18 @@ func TestBackupItemNoSkips(t *testing.T) {
|
||||
resourceHooks,
|
||||
dynamicFactory,
|
||||
discoveryHelper,
|
||||
nil,
|
||||
).(*defaultItemBackupper)
|
||||
|
||||
var snapshotService *arktest.FakeSnapshotService
|
||||
if test.snapshottableVolumes != nil {
|
||||
snapshotService = &arktest.FakeSnapshotService{
|
||||
SnapshottableVolumes: test.snapshottableVolumes,
|
||||
VolumeID: "vol-abc123",
|
||||
}
|
||||
b.snapshotService = snapshotService
|
||||
}
|
||||
|
||||
// make sure the podCommandExecutor was set correctly in the real hook handler
|
||||
assert.Equal(t, podCommandExecutor, b.itemHookHandler.(*defaultItemHookHandler).podCommandExecutor)
|
||||
|
||||
@@ -297,7 +359,7 @@ func TestBackupItemNoSkips(t *testing.T) {
|
||||
err = b.backupItem(arktest.NewLogger(), obj, groupResource)
|
||||
gotError := err != nil
|
||||
if e, a := test.expectError, gotError; e != a {
|
||||
t.Fatalf("error: expected %t, got %t", e, a)
|
||||
t.Fatalf("error: expected %t, got %t: %v", e, a, err)
|
||||
}
|
||||
if test.expectError {
|
||||
return
|
||||
@@ -344,10 +406,181 @@ func TestBackupItemNoSkips(t *testing.T) {
|
||||
t.Errorf("action.ids[0]: expected %s, got %s", e, a)
|
||||
}
|
||||
|
||||
if len(action.backups) != 1 {
|
||||
t.Errorf("unexpected custom action backups: %#v", action.backups)
|
||||
} else if e, a := backup, action.backups[0]; e != a {
|
||||
t.Errorf("action.backups[0]: expected %#v, got %#v", e, a)
|
||||
require.Equal(t, 1, len(action.backups), "unexpected custom action backups: %#v", action.backups)
|
||||
assert.Equal(t, backup, &(action.backups[0]), "backup")
|
||||
}
|
||||
|
||||
if test.snapshottableVolumes != nil {
|
||||
require.Equal(t, 1, len(snapshotService.SnapshotsTaken))
|
||||
|
||||
var expectedBackups []api.VolumeBackupInfo
|
||||
for _, vbi := range test.snapshottableVolumes {
|
||||
expectedBackups = append(expectedBackups, vbi)
|
||||
}
|
||||
|
||||
var actualBackups []api.VolumeBackupInfo
|
||||
for _, vbi := range backup.Status.VolumeBackups {
|
||||
actualBackups = append(actualBackups, *vbi)
|
||||
}
|
||||
|
||||
assert.Equal(t, expectedBackups, actualBackups)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTakePVSnapshot(t *testing.T) {
|
||||
iops := int64(1000)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
snapshotEnabled bool
|
||||
pv string
|
||||
ttl time.Duration
|
||||
expectError bool
|
||||
expectedVolumeID string
|
||||
expectedSnapshotsTaken int
|
||||
existingVolumeBackups map[string]*v1.VolumeBackupInfo
|
||||
volumeInfo map[string]v1.VolumeBackupInfo
|
||||
}{
|
||||
{
|
||||
name: "snapshot disabled",
|
||||
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}}`,
|
||||
snapshotEnabled: false,
|
||||
},
|
||||
{
|
||||
name: "unsupported PV source type",
|
||||
snapshotEnabled: true,
|
||||
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"unsupportedPVSource": {}}}`,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "without iops",
|
||||
snapshotEnabled: true,
|
||||
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv", "labels": {"failure-domain.beta.kubernetes.io/zone": "us-east-1c"}}, "spec": {"awsElasticBlockStore": {"volumeID": "aws://us-east-1c/vol-abc123"}}}`,
|
||||
expectError: false,
|
||||
expectedSnapshotsTaken: 1,
|
||||
expectedVolumeID: "vol-abc123",
|
||||
ttl: 5 * time.Minute,
|
||||
volumeInfo: map[string]v1.VolumeBackupInfo{
|
||||
"vol-abc123": {Type: "gp", SnapshotID: "snap-1", AvailabilityZone: "us-east-1c"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "with iops",
|
||||
snapshotEnabled: true,
|
||||
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv", "labels": {"failure-domain.beta.kubernetes.io/zone": "us-east-1c"}}, "spec": {"awsElasticBlockStore": {"volumeID": "aws://us-east-1c/vol-abc123"}}}`,
|
||||
expectError: false,
|
||||
expectedSnapshotsTaken: 1,
|
||||
expectedVolumeID: "vol-abc123",
|
||||
ttl: 5 * time.Minute,
|
||||
volumeInfo: map[string]v1.VolumeBackupInfo{
|
||||
"vol-abc123": {Type: "io1", Iops: &iops, SnapshotID: "snap-1", AvailabilityZone: "us-east-1c"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "preexisting volume backup info in backup status",
|
||||
snapshotEnabled: true,
|
||||
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"gcePersistentDisk": {"pdName": "pd-abc123"}}}`,
|
||||
expectError: false,
|
||||
expectedSnapshotsTaken: 1,
|
||||
expectedVolumeID: "pd-abc123",
|
||||
ttl: 5 * time.Minute,
|
||||
existingVolumeBackups: map[string]*v1.VolumeBackupInfo{
|
||||
"anotherpv": {SnapshotID: "anothersnap"},
|
||||
},
|
||||
volumeInfo: map[string]v1.VolumeBackupInfo{
|
||||
"pd-abc123": {Type: "gp", SnapshotID: "snap-1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "create snapshot error",
|
||||
snapshotEnabled: true,
|
||||
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"gcePersistentDisk": {"pdName": "pd-abc123"}}}`,
|
||||
expectedVolumeID: "pd-abc123",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "PV with label metadata but no failureDomainZone",
|
||||
snapshotEnabled: true,
|
||||
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv", "labels": {"failure-domain.beta.kubernetes.io/region": "us-east-1"}}, "spec": {"awsElasticBlockStore": {"volumeID": "aws://us-east-1c/vol-abc123"}}}`,
|
||||
expectError: false,
|
||||
expectedSnapshotsTaken: 1,
|
||||
expectedVolumeID: "vol-abc123",
|
||||
ttl: 5 * time.Minute,
|
||||
volumeInfo: map[string]v1.VolumeBackupInfo{
|
||||
"vol-abc123": {Type: "gp", SnapshotID: "snap-1"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
backup := &v1.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: v1.DefaultNamespace,
|
||||
Name: "mybackup",
|
||||
},
|
||||
Spec: v1.BackupSpec{
|
||||
SnapshotVolumes: &test.snapshotEnabled,
|
||||
TTL: metav1.Duration{Duration: test.ttl},
|
||||
},
|
||||
Status: v1.BackupStatus{
|
||||
VolumeBackups: test.existingVolumeBackups,
|
||||
},
|
||||
}
|
||||
|
||||
snapshotService := &arktest.FakeSnapshotService{
|
||||
SnapshottableVolumes: test.volumeInfo,
|
||||
VolumeID: test.expectedVolumeID,
|
||||
}
|
||||
|
||||
ib := &defaultItemBackupper{snapshotService: snapshotService}
|
||||
|
||||
pv, err := getAsMap(test.pv)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// method under test
|
||||
err = ib.takePVSnapshot(&unstructured.Unstructured{Object: pv}, backup, arktest.NewLogger())
|
||||
|
||||
gotErr := err != nil
|
||||
|
||||
if e, a := test.expectError, gotErr; e != a {
|
||||
t.Errorf("error: expected %v, got %v", e, a)
|
||||
}
|
||||
if test.expectError {
|
||||
return
|
||||
}
|
||||
|
||||
if !test.snapshotEnabled {
|
||||
// don't need to check anything else if snapshots are disabled
|
||||
return
|
||||
}
|
||||
|
||||
expectedVolumeBackups := test.existingVolumeBackups
|
||||
if expectedVolumeBackups == nil {
|
||||
expectedVolumeBackups = make(map[string]*v1.VolumeBackupInfo)
|
||||
}
|
||||
|
||||
// we should have one snapshot taken exactly
|
||||
require.Equal(t, test.expectedSnapshotsTaken, snapshotService.SnapshotsTaken.Len())
|
||||
|
||||
if test.expectedSnapshotsTaken > 0 {
|
||||
// the snapshotID should be the one in the entry in snapshotService.SnapshottableVolumes
|
||||
// for the volume we ran the test for
|
||||
snapshotID, _ := snapshotService.SnapshotsTaken.PopAny()
|
||||
|
||||
expectedVolumeBackups["mypv"] = &v1.VolumeBackupInfo{
|
||||
SnapshotID: snapshotID,
|
||||
Type: test.volumeInfo[test.expectedVolumeID].Type,
|
||||
Iops: test.volumeInfo[test.expectedVolumeID].Iops,
|
||||
AvailabilityZone: test.volumeInfo[test.expectedVolumeID].AvailabilityZone,
|
||||
}
|
||||
|
||||
if e, a := expectedVolumeBackups, backup.Status.VolumeBackups; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("backup.status.VolumeBackups: expected %v, got %v", e, a)
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -378,7 +611,7 @@ type mockItemBackupper struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (ib *mockItemBackupper) backupItem(logger *logrus.Entry, obj runtime.Unstructured, groupResource schema.GroupResource) error {
|
||||
func (ib *mockItemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstructured, groupResource schema.GroupResource) error {
|
||||
args := ib.Called(logger, obj, groupResource)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
@@ -25,9 +25,8 @@ import (
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
remotecommandconsts "k8s.io/apimachinery/pkg/util/remotecommand"
|
||||
kapiv1 "k8s.io/api/core/v1"
|
||||
kscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
kapiv1 "k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
)
|
||||
@@ -129,7 +128,7 @@ func (e *defaultPodCommandExecutor) executePodCommand(log *logrus.Entry, item ma
|
||||
Stderr: true,
|
||||
}, kscheme.ParameterCodec)
|
||||
|
||||
executor, err := e.streamExecutorFactory.NewExecutor(e.restClientConfig, "POST", req.URL())
|
||||
executor, err := e.streamExecutorFactory.NewSPDYExecutor(e.restClientConfig, "POST", req.URL())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -137,9 +136,8 @@ func (e *defaultPodCommandExecutor) executePodCommand(log *logrus.Entry, item ma
|
||||
var stdout, stderr bytes.Buffer
|
||||
|
||||
streamOptions := remotecommand.StreamOptions{
|
||||
SupportedProtocols: remotecommandconsts.SupportedStreamingProtocols,
|
||||
Stdout: &stdout,
|
||||
Stderr: &stderr,
|
||||
Stdout: &stdout,
|
||||
Stderr: &stderr,
|
||||
}
|
||||
|
||||
errCh := make(chan error)
|
||||
@@ -215,11 +213,11 @@ func setDefaultHookContainer(pod map[string]interface{}, hook *api.ExecHook) err
|
||||
}
|
||||
|
||||
type streamExecutorFactory interface {
|
||||
NewExecutor(config *rest.Config, method string, url *url.URL) (remotecommand.StreamExecutor, error)
|
||||
NewSPDYExecutor(config *rest.Config, method string, url *url.URL) (remotecommand.Executor, error)
|
||||
}
|
||||
|
||||
type defaultStreamExecutorFactory struct{}
|
||||
|
||||
func (f *defaultStreamExecutorFactory) NewExecutor(config *rest.Config, method string, url *url.URL) (remotecommand.StreamExecutor, error) {
|
||||
return remotecommand.NewExecutor(config, method, url)
|
||||
func (f *defaultStreamExecutorFactory) NewSPDYExecutor(config *rest.Config, method string, url *url.URL) (remotecommand.Executor, error) {
|
||||
return remotecommand.NewSPDYExecutor(config, method, url)
|
||||
}
|
||||
|
||||
@@ -33,7 +33,6 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
remotecommandconsts "k8s.io/apimachinery/pkg/util/remotecommand"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
)
|
||||
@@ -201,13 +200,12 @@ func TestExecutePodCommand(t *testing.T) {
|
||||
expectedURL, _ := url.Parse(
|
||||
fmt.Sprintf("https://some.server/api/v1/namespaces/namespace/pods/name/exec?command=%s&container=%s&stderr=true&stdout=true", expectedCommand, test.expectedContainerName),
|
||||
)
|
||||
streamExecutorFactory.On("NewExecutor", clientConfig, "POST", expectedURL).Return(streamExecutor, nil)
|
||||
streamExecutorFactory.On("NewSPDYExecutor", clientConfig, "POST", expectedURL).Return(streamExecutor, nil)
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
expectedStreamOptions := remotecommand.StreamOptions{
|
||||
SupportedProtocols: remotecommandconsts.SupportedStreamingProtocols,
|
||||
Stdout: &stdout,
|
||||
Stderr: &stderr,
|
||||
Stdout: &stdout,
|
||||
Stderr: &stderr,
|
||||
}
|
||||
streamExecutor.On("Stream", expectedStreamOptions).Return(test.hookError)
|
||||
|
||||
@@ -244,14 +242,14 @@ type mockStreamExecutorFactory struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (f *mockStreamExecutorFactory) NewExecutor(config *rest.Config, method string, url *url.URL) (remotecommand.StreamExecutor, error) {
|
||||
func (f *mockStreamExecutorFactory) NewSPDYExecutor(config *rest.Config, method string, url *url.URL) (remotecommand.Executor, error) {
|
||||
args := f.Called(config, method, url)
|
||||
return args.Get(0).(remotecommand.StreamExecutor), args.Error(1)
|
||||
return args.Get(0).(remotecommand.Executor), args.Error(1)
|
||||
}
|
||||
|
||||
type mockStreamExecutor struct {
|
||||
mock.Mock
|
||||
remotecommand.StreamExecutor
|
||||
remotecommand.Executor
|
||||
}
|
||||
|
||||
func (e *mockStreamExecutor) Stream(options remotecommand.StreamOptions) error {
|
||||
|
||||
@@ -19,12 +19,14 @@ package backup
|
||||
import (
|
||||
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
"github.com/heptio/ark/pkg/discovery"
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
kuberrs "k8s.io/apimachinery/pkg/util/errors"
|
||||
@@ -32,7 +34,7 @@ import (
|
||||
|
||||
type resourceBackupperFactory interface {
|
||||
newResourceBackupper(
|
||||
log *logrus.Entry,
|
||||
log logrus.FieldLogger,
|
||||
backup *api.Backup,
|
||||
namespaces *collections.IncludesExcludes,
|
||||
resources *collections.IncludesExcludes,
|
||||
@@ -41,17 +43,18 @@ type resourceBackupperFactory interface {
|
||||
discoveryHelper discovery.Helper,
|
||||
backedUpItems map[itemKey]struct{},
|
||||
cohabitatingResources map[string]*cohabitatingResource,
|
||||
actions map[schema.GroupResource]Action,
|
||||
actions []resolvedAction,
|
||||
podCommandExecutor podCommandExecutor,
|
||||
tarWriter tarWriter,
|
||||
resourceHooks []resourceHook,
|
||||
snapshotService cloudprovider.SnapshotService,
|
||||
) resourceBackupper
|
||||
}
|
||||
|
||||
type defaultResourceBackupperFactory struct{}
|
||||
|
||||
func (f *defaultResourceBackupperFactory) newResourceBackupper(
|
||||
log *logrus.Entry,
|
||||
log logrus.FieldLogger,
|
||||
backup *api.Backup,
|
||||
namespaces *collections.IncludesExcludes,
|
||||
resources *collections.IncludesExcludes,
|
||||
@@ -60,10 +63,11 @@ func (f *defaultResourceBackupperFactory) newResourceBackupper(
|
||||
discoveryHelper discovery.Helper,
|
||||
backedUpItems map[itemKey]struct{},
|
||||
cohabitatingResources map[string]*cohabitatingResource,
|
||||
actions map[schema.GroupResource]Action,
|
||||
actions []resolvedAction,
|
||||
podCommandExecutor podCommandExecutor,
|
||||
tarWriter tarWriter,
|
||||
resourceHooks []resourceHook,
|
||||
snapshotService cloudprovider.SnapshotService,
|
||||
) resourceBackupper {
|
||||
return &defaultResourceBackupper{
|
||||
log: log,
|
||||
@@ -79,8 +83,8 @@ func (f *defaultResourceBackupperFactory) newResourceBackupper(
|
||||
podCommandExecutor: podCommandExecutor,
|
||||
tarWriter: tarWriter,
|
||||
resourceHooks: resourceHooks,
|
||||
|
||||
itemBackupperFactory: &defaultItemBackupperFactory{},
|
||||
snapshotService: snapshotService,
|
||||
itemBackupperFactory: &defaultItemBackupperFactory{},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,7 +93,7 @@ type resourceBackupper interface {
|
||||
}
|
||||
|
||||
type defaultResourceBackupper struct {
|
||||
log *logrus.Entry
|
||||
log logrus.FieldLogger
|
||||
backup *api.Backup
|
||||
namespaces *collections.IncludesExcludes
|
||||
resources *collections.IncludesExcludes
|
||||
@@ -98,12 +102,12 @@ type defaultResourceBackupper struct {
|
||||
discoveryHelper discovery.Helper
|
||||
backedUpItems map[itemKey]struct{}
|
||||
cohabitatingResources map[string]*cohabitatingResource
|
||||
actions map[schema.GroupResource]Action
|
||||
actions []resolvedAction
|
||||
podCommandExecutor podCommandExecutor
|
||||
tarWriter tarWriter
|
||||
resourceHooks []resourceHook
|
||||
|
||||
itemBackupperFactory itemBackupperFactory
|
||||
snapshotService cloudprovider.SnapshotService
|
||||
itemBackupperFactory itemBackupperFactory
|
||||
}
|
||||
|
||||
// backupResource backs up all the objects for a given group-version-resource.
|
||||
@@ -122,24 +126,29 @@ func (rb *defaultResourceBackupper) backupResource(
|
||||
|
||||
log := rb.log.WithField("groupResource", grString)
|
||||
|
||||
switch {
|
||||
case rb.backup.Spec.IncludeClusterResources == nil:
|
||||
// when IncludeClusterResources == nil (auto), only directly
|
||||
// back up cluster-scoped resources if we're doing a full-cluster
|
||||
// (all namespaces) backup. Note that in the case of a subset of
|
||||
// namespaces being backed up, some related cluster-scoped resources
|
||||
// may still be backed up if triggered by a custom action (e.g. PVC->PV).
|
||||
if !resource.Namespaced && !rb.namespaces.IncludeEverything() {
|
||||
log.Info("Skipping resource because it's cluster-scoped and only specific namespaces are included in the backup")
|
||||
return nil
|
||||
}
|
||||
case *rb.backup.Spec.IncludeClusterResources == false:
|
||||
if !resource.Namespaced {
|
||||
log.Info("Evaluating resource")
|
||||
|
||||
clusterScoped := !resource.Namespaced
|
||||
|
||||
// If the resource we are backing up is NOT namespaces, and it is cluster-scoped, check to see if
|
||||
// we should include it based on the IncludeClusterResources setting.
|
||||
if gr != namespacesGroupResource && clusterScoped {
|
||||
if rb.backup.Spec.IncludeClusterResources == nil {
|
||||
if !rb.namespaces.IncludeEverything() {
|
||||
// when IncludeClusterResources == nil (auto), only directly
|
||||
// back up cluster-scoped resources if we're doing a full-cluster
|
||||
// (all namespaces) backup. Note that in the case of a subset of
|
||||
// namespaces being backed up, some related cluster-scoped resources
|
||||
// may still be backed up if triggered by a custom action (e.g. PVC->PV).
|
||||
// If we're processing namespaces themselves, we will not skip here, they may be
|
||||
// filtered out later.
|
||||
log.Info("Skipping resource because it's cluster-scoped and only specific namespaces are included in the backup")
|
||||
return nil
|
||||
}
|
||||
} else if !*rb.backup.Spec.IncludeClusterResources {
|
||||
log.Info("Skipping resource because it's cluster-scoped")
|
||||
return nil
|
||||
}
|
||||
case *rb.backup.Spec.IncludeClusterResources == true:
|
||||
// include the resource, no action required
|
||||
}
|
||||
|
||||
if !rb.resources.ShouldInclude(grString) {
|
||||
@@ -171,20 +180,61 @@ func (rb *defaultResourceBackupper) backupResource(
|
||||
rb.resourceHooks,
|
||||
rb.dynamicFactory,
|
||||
rb.discoveryHelper,
|
||||
rb.snapshotService,
|
||||
)
|
||||
|
||||
var namespacesToList []string
|
||||
if resource.Namespaced {
|
||||
namespacesToList = getNamespacesToList(rb.namespaces)
|
||||
} else {
|
||||
namespacesToList := getNamespacesToList(rb.namespaces)
|
||||
|
||||
// Check if we're backing up namespaces, and only certain ones
|
||||
if gr == namespacesGroupResource && namespacesToList[0] != "" {
|
||||
resourceClient, err := rb.dynamicFactory.ClientForGroupVersionResource(gv, resource, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var labelSelector labels.Selector
|
||||
if rb.backup.Spec.LabelSelector != nil {
|
||||
labelSelector, err = metav1.LabelSelectorAsSelector(rb.backup.Spec.LabelSelector)
|
||||
if err != nil {
|
||||
// This should never happen...
|
||||
return errors.Wrap(err, "invalid label selector")
|
||||
}
|
||||
}
|
||||
|
||||
for _, ns := range namespacesToList {
|
||||
log.WithField("namespace", ns).Info("Getting namespace")
|
||||
unstructured, err := resourceClient.Get(ns, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
errs = append(errs, errors.Wrap(err, "error getting namespace"))
|
||||
continue
|
||||
}
|
||||
|
||||
labels := labels.Set(unstructured.GetLabels())
|
||||
if labelSelector != nil && !labelSelector.Matches(labels) {
|
||||
log.WithField("name", unstructured.GetName()).Info("skipping item because it does not match the backup's label selector")
|
||||
continue
|
||||
}
|
||||
|
||||
if err := itemBackupper.backupItem(log, unstructured, gr); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
return kuberrs.NewAggregate(errs)
|
||||
}
|
||||
|
||||
// If we get here, we're backing up something other than namespaces
|
||||
if clusterScoped {
|
||||
namespacesToList = []string{""}
|
||||
}
|
||||
|
||||
for _, namespace := range namespacesToList {
|
||||
resourceClient, err := rb.dynamicFactory.ClientForGroupVersionResource(gv, resource, namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.WithField("namespace", namespace).Info("Listing items")
|
||||
unstructuredList, err := resourceClient.List(metav1.ListOptions{LabelSelector: rb.labelSelector})
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
@@ -196,6 +246,7 @@ func (rb *defaultResourceBackupper) backupResource(
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
log.WithField("namespace", namespace).Infof("Retrieved %d items", len(items))
|
||||
for _, item := range items {
|
||||
unstructured, ok := item.(runtime.Unstructured)
|
||||
if !ok {
|
||||
@@ -203,6 +254,17 @@ func (rb *defaultResourceBackupper) backupResource(
|
||||
continue
|
||||
}
|
||||
|
||||
metadata, err := meta.Accessor(unstructured)
|
||||
if err != nil {
|
||||
errs = append(errs, errors.Wrapf(err, "unable to get a metadata accessor"))
|
||||
continue
|
||||
}
|
||||
|
||||
if gr == namespacesGroupResource && !rb.namespaces.ShouldInclude(metadata.GetName()) {
|
||||
log.WithField("name", metadata.GetName()).Info("skipping namespace because it is excluded")
|
||||
continue
|
||||
}
|
||||
|
||||
if err := itemBackupper.backupItem(log, unstructured, gr); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
@@ -21,9 +21,11 @@ import (
|
||||
|
||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
"github.com/heptio/ark/pkg/discovery"
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
arktest "github.com/heptio/ark/pkg/util/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -50,6 +52,7 @@ func TestBackupResource(t *testing.T) {
|
||||
groupVersion schema.GroupVersion
|
||||
groupResource schema.GroupResource
|
||||
listResponses [][]*unstructured.Unstructured
|
||||
getResponses []*unstructured.Unstructured
|
||||
includeClusterResources *bool
|
||||
}{
|
||||
{
|
||||
@@ -195,6 +198,22 @@ func TestBackupResource(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "should include specified namespaces if backing up subset of namespaces and --include-cluster-resources=nil",
|
||||
namespaces: collections.NewIncludesExcludes().Includes("ns-1", "ns-2"),
|
||||
resources: collections.NewIncludesExcludes(),
|
||||
includeClusterResources: nil,
|
||||
expectedListedNamespaces: []string{"ns-1", "ns-2"},
|
||||
apiGroup: v1Group,
|
||||
apiResource: namespacesResource,
|
||||
groupVersion: schema.GroupVersion{Group: "", Version: "v1"},
|
||||
groupResource: schema.GroupResource{Group: "", Resource: "namespaces"},
|
||||
expectSkip: false,
|
||||
getResponses: []*unstructured.Unstructured{
|
||||
unstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-1"}}`),
|
||||
unstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-2"}}`),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@@ -212,7 +231,7 @@ func TestBackupResource(t *testing.T) {
|
||||
discoveryHelper := arktest.NewFakeDiscoveryHelper(true, nil)
|
||||
|
||||
backedUpItems := map[itemKey]struct{}{
|
||||
{resource: "foo", namespace: "ns", name: "name"}: struct{}{},
|
||||
{resource: "foo", namespace: "ns", name: "name"}: {},
|
||||
}
|
||||
|
||||
cohabitatingResources := map[string]*cohabitatingResource{
|
||||
@@ -220,8 +239,11 @@ func TestBackupResource(t *testing.T) {
|
||||
"networkpolicies": newCohabitatingResource("networkpolicies", "extensions", "networking.k8s.io"),
|
||||
}
|
||||
|
||||
actions := map[schema.GroupResource]Action{
|
||||
{Group: "", Resource: "pods"}: &fakeAction{},
|
||||
actions := []resolvedAction{
|
||||
{
|
||||
ItemAction: newFakeAction("pods"),
|
||||
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("pods"),
|
||||
},
|
||||
}
|
||||
|
||||
resourceHooks := []resourceHook{
|
||||
@@ -248,6 +270,7 @@ func TestBackupResource(t *testing.T) {
|
||||
podCommandExecutor,
|
||||
tarWriter,
|
||||
resourceHooks,
|
||||
nil,
|
||||
).(*defaultResourceBackupper)
|
||||
|
||||
itemBackupperFactory := &mockItemBackupperFactory{}
|
||||
@@ -269,25 +292,41 @@ func TestBackupResource(t *testing.T) {
|
||||
resourceHooks,
|
||||
dynamicFactory,
|
||||
discoveryHelper,
|
||||
mock.Anything,
|
||||
).Return(itemBackupper)
|
||||
|
||||
for i, namespace := range test.expectedListedNamespaces {
|
||||
if len(test.listResponses) > 0 {
|
||||
for i, namespace := range test.expectedListedNamespaces {
|
||||
client := &arktest.FakeDynamicClient{}
|
||||
defer client.AssertExpectations(t)
|
||||
|
||||
dynamicFactory.On("ClientForGroupVersionResource", test.groupVersion, test.apiResource, namespace).Return(client, nil)
|
||||
|
||||
list := &unstructured.UnstructuredList{
|
||||
Items: []unstructured.Unstructured{},
|
||||
}
|
||||
for _, item := range test.listResponses[i] {
|
||||
list.Items = append(list.Items, *item)
|
||||
itemBackupper.On("backupItem", mock.AnythingOfType("*logrus.Entry"), item, test.groupResource).Return(nil)
|
||||
}
|
||||
client.On("List", metav1.ListOptions{LabelSelector: labelSelector}).Return(list, nil)
|
||||
}
|
||||
}
|
||||
|
||||
if len(test.getResponses) > 0 {
|
||||
client := &arktest.FakeDynamicClient{}
|
||||
defer client.AssertExpectations(t)
|
||||
|
||||
dynamicFactory.On("ClientForGroupVersionResource", test.groupVersion, test.apiResource, namespace).Return(client, nil)
|
||||
dynamicFactory.On("ClientForGroupVersionResource", test.groupVersion, test.apiResource, "").Return(client, nil)
|
||||
|
||||
list := &unstructured.UnstructuredList{
|
||||
Items: []unstructured.Unstructured{},
|
||||
}
|
||||
for _, item := range test.listResponses[i] {
|
||||
list.Items = append(list.Items, *item)
|
||||
for i, namespace := range test.expectedListedNamespaces {
|
||||
item := test.getResponses[i]
|
||||
client.On("Get", namespace, metav1.GetOptions{}).Return(item, nil)
|
||||
itemBackupper.On("backupItem", mock.AnythingOfType("*logrus.Entry"), item, test.groupResource).Return(nil)
|
||||
}
|
||||
client.On("List", metav1.ListOptions{LabelSelector: labelSelector}).Return(list, nil)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
err := rb.backupResource(test.apiGroup, test.apiResource)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
@@ -352,7 +391,7 @@ func TestBackupResourceCohabitation(t *testing.T) {
|
||||
discoveryHelper := arktest.NewFakeDiscoveryHelper(true, nil)
|
||||
|
||||
backedUpItems := map[itemKey]struct{}{
|
||||
{resource: "foo", namespace: "ns", name: "name"}: struct{}{},
|
||||
{resource: "foo", namespace: "ns", name: "name"}: {},
|
||||
}
|
||||
|
||||
cohabitatingResources := map[string]*cohabitatingResource{
|
||||
@@ -360,8 +399,11 @@ func TestBackupResourceCohabitation(t *testing.T) {
|
||||
"networkpolicies": newCohabitatingResource("networkpolicies", "extensions", "networking.k8s.io"),
|
||||
}
|
||||
|
||||
actions := map[schema.GroupResource]Action{
|
||||
{Group: "", Resource: "pods"}: &fakeAction{},
|
||||
actions := []resolvedAction{
|
||||
{
|
||||
ItemAction: newFakeAction("pods"),
|
||||
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("pods"),
|
||||
},
|
||||
}
|
||||
|
||||
resourceHooks := []resourceHook{
|
||||
@@ -387,6 +429,7 @@ func TestBackupResourceCohabitation(t *testing.T) {
|
||||
podCommandExecutor,
|
||||
tarWriter,
|
||||
resourceHooks,
|
||||
nil,
|
||||
).(*defaultResourceBackupper)
|
||||
|
||||
itemBackupperFactory := &mockItemBackupperFactory{}
|
||||
@@ -407,6 +450,7 @@ func TestBackupResourceCohabitation(t *testing.T) {
|
||||
resourceHooks,
|
||||
dynamicFactory,
|
||||
discoveryHelper,
|
||||
mock.Anything,
|
||||
).Return(itemBackupper)
|
||||
|
||||
client := &arktest.FakeDynamicClient{}
|
||||
@@ -427,6 +471,185 @@ func TestBackupResourceCohabitation(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackupResourceOnlyIncludesSpecifiedNamespaces(t *testing.T) {
|
||||
backup := &v1.Backup{}
|
||||
|
||||
namespaces := collections.NewIncludesExcludes().Includes("ns-1")
|
||||
resources := collections.NewIncludesExcludes().Includes("*")
|
||||
|
||||
labelSelector := "foo=bar"
|
||||
backedUpItems := map[itemKey]struct{}{}
|
||||
|
||||
dynamicFactory := &arktest.FakeDynamicFactory{}
|
||||
defer dynamicFactory.AssertExpectations(t)
|
||||
|
||||
discoveryHelper := arktest.NewFakeDiscoveryHelper(true, nil)
|
||||
|
||||
cohabitatingResources := map[string]*cohabitatingResource{}
|
||||
|
||||
actions := []resolvedAction{}
|
||||
|
||||
resourceHooks := []resourceHook{}
|
||||
|
||||
podCommandExecutor := &mockPodCommandExecutor{}
|
||||
defer podCommandExecutor.AssertExpectations(t)
|
||||
|
||||
tarWriter := &fakeTarWriter{}
|
||||
|
||||
rb := (&defaultResourceBackupperFactory{}).newResourceBackupper(
|
||||
arktest.NewLogger(),
|
||||
backup,
|
||||
namespaces,
|
||||
resources,
|
||||
labelSelector,
|
||||
dynamicFactory,
|
||||
discoveryHelper,
|
||||
backedUpItems,
|
||||
cohabitatingResources,
|
||||
actions,
|
||||
podCommandExecutor,
|
||||
tarWriter,
|
||||
resourceHooks,
|
||||
nil,
|
||||
).(*defaultResourceBackupper)
|
||||
|
||||
itemBackupperFactory := &mockItemBackupperFactory{}
|
||||
defer itemBackupperFactory.AssertExpectations(t)
|
||||
rb.itemBackupperFactory = itemBackupperFactory
|
||||
|
||||
itemHookHandler := &mockItemHookHandler{}
|
||||
defer itemHookHandler.AssertExpectations(t)
|
||||
|
||||
itemBackupper := &defaultItemBackupper{
|
||||
backup: backup,
|
||||
namespaces: namespaces,
|
||||
resources: resources,
|
||||
backedUpItems: backedUpItems,
|
||||
actions: actions,
|
||||
tarWriter: tarWriter,
|
||||
resourceHooks: resourceHooks,
|
||||
dynamicFactory: dynamicFactory,
|
||||
discoveryHelper: discoveryHelper,
|
||||
itemHookHandler: itemHookHandler,
|
||||
snapshotService: nil,
|
||||
}
|
||||
|
||||
itemBackupperFactory.On("newItemBackupper",
|
||||
backup,
|
||||
namespaces,
|
||||
resources,
|
||||
backedUpItems,
|
||||
actions,
|
||||
podCommandExecutor,
|
||||
tarWriter,
|
||||
resourceHooks,
|
||||
dynamicFactory,
|
||||
discoveryHelper,
|
||||
mock.Anything,
|
||||
).Return(itemBackupper)
|
||||
|
||||
client := &arktest.FakeDynamicClient{}
|
||||
defer client.AssertExpectations(t)
|
||||
|
||||
coreV1Group := schema.GroupVersion{Group: "", Version: "v1"}
|
||||
dynamicFactory.On("ClientForGroupVersionResource", coreV1Group, namespacesResource, "").Return(client, nil)
|
||||
ns1 := unstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-1"}}`)
|
||||
client.On("Get", "ns-1", metav1.GetOptions{}).Return(ns1, nil)
|
||||
|
||||
itemHookHandler.On("handleHooks", mock.Anything, schema.GroupResource{Group: "", Resource: "namespaces"}, ns1, resourceHooks).Return(nil)
|
||||
|
||||
err := rb.backupResource(v1Group, namespacesResource)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, tarWriter.headers, 1)
|
||||
assert.Equal(t, "resources/namespaces/cluster/ns-1.json", tarWriter.headers[0].Name)
|
||||
}
|
||||
|
||||
func TestBackupResourceListAllNamespacesExcludesCorrectly(t *testing.T) {
|
||||
backup := &v1.Backup{}
|
||||
|
||||
namespaces := collections.NewIncludesExcludes().Excludes("ns-1")
|
||||
resources := collections.NewIncludesExcludes().Includes("*")
|
||||
|
||||
labelSelector := "foo=bar"
|
||||
backedUpItems := map[itemKey]struct{}{}
|
||||
|
||||
dynamicFactory := &arktest.FakeDynamicFactory{}
|
||||
defer dynamicFactory.AssertExpectations(t)
|
||||
|
||||
discoveryHelper := arktest.NewFakeDiscoveryHelper(true, nil)
|
||||
|
||||
cohabitatingResources := map[string]*cohabitatingResource{}
|
||||
|
||||
actions := []resolvedAction{}
|
||||
|
||||
resourceHooks := []resourceHook{}
|
||||
|
||||
podCommandExecutor := &mockPodCommandExecutor{}
|
||||
defer podCommandExecutor.AssertExpectations(t)
|
||||
|
||||
tarWriter := &fakeTarWriter{}
|
||||
|
||||
rb := (&defaultResourceBackupperFactory{}).newResourceBackupper(
|
||||
arktest.NewLogger(),
|
||||
backup,
|
||||
namespaces,
|
||||
resources,
|
||||
labelSelector,
|
||||
dynamicFactory,
|
||||
discoveryHelper,
|
||||
backedUpItems,
|
||||
cohabitatingResources,
|
||||
actions,
|
||||
podCommandExecutor,
|
||||
tarWriter,
|
||||
resourceHooks,
|
||||
nil,
|
||||
).(*defaultResourceBackupper)
|
||||
|
||||
itemBackupperFactory := &mockItemBackupperFactory{}
|
||||
defer itemBackupperFactory.AssertExpectations(t)
|
||||
rb.itemBackupperFactory = itemBackupperFactory
|
||||
|
||||
itemHookHandler := &mockItemHookHandler{}
|
||||
defer itemHookHandler.AssertExpectations(t)
|
||||
|
||||
itemBackupper := &mockItemBackupper{}
|
||||
defer itemBackupper.AssertExpectations(t)
|
||||
|
||||
itemBackupperFactory.On("newItemBackupper",
|
||||
backup,
|
||||
namespaces,
|
||||
resources,
|
||||
backedUpItems,
|
||||
actions,
|
||||
podCommandExecutor,
|
||||
tarWriter,
|
||||
resourceHooks,
|
||||
dynamicFactory,
|
||||
discoveryHelper,
|
||||
mock.Anything,
|
||||
).Return(itemBackupper)
|
||||
|
||||
client := &arktest.FakeDynamicClient{}
|
||||
defer client.AssertExpectations(t)
|
||||
|
||||
coreV1Group := schema.GroupVersion{Group: "", Version: "v1"}
|
||||
dynamicFactory.On("ClientForGroupVersionResource", coreV1Group, namespacesResource, "").Return(client, nil)
|
||||
|
||||
ns1 := unstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-1"}}`)
|
||||
ns2 := unstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-2"}}`)
|
||||
list := &unstructured.UnstructuredList{
|
||||
Items: []unstructured.Unstructured{*ns1, *ns2},
|
||||
}
|
||||
client.On("List", metav1.ListOptions{LabelSelector: labelSelector}).Return(list, nil)
|
||||
|
||||
itemBackupper.On("backupItem", mock.AnythingOfType("*logrus.Entry"), ns2, namespacesGroupResource).Return(nil)
|
||||
|
||||
err := rb.backupResource(v1Group, namespacesResource)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
type mockItemBackupperFactory struct {
|
||||
mock.Mock
|
||||
}
|
||||
@@ -435,12 +658,13 @@ func (ibf *mockItemBackupperFactory) newItemBackupper(
|
||||
backup *v1.Backup,
|
||||
namespaces, resources *collections.IncludesExcludes,
|
||||
backedUpItems map[itemKey]struct{},
|
||||
actions map[schema.GroupResource]Action,
|
||||
actions []resolvedAction,
|
||||
podCommandExecutor podCommandExecutor,
|
||||
tarWriter tarWriter,
|
||||
resourceHooks []resourceHook,
|
||||
dynamicFactory client.DynamicFactory,
|
||||
discoveryHelper discovery.Helper,
|
||||
snapshotService cloudprovider.SnapshotService,
|
||||
) ItemBackupper {
|
||||
args := ibf.Called(
|
||||
backup,
|
||||
@@ -453,292 +677,7 @@ func (ibf *mockItemBackupperFactory) newItemBackupper(
|
||||
resourceHooks,
|
||||
dynamicFactory,
|
||||
discoveryHelper,
|
||||
snapshotService,
|
||||
)
|
||||
return args.Get(0).(ItemBackupper)
|
||||
}
|
||||
|
||||
/*
|
||||
func TestBackupResource2(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
resourceIncludesExcludes *collections.IncludesExcludes
|
||||
resourceGroup string
|
||||
resourceVersion string
|
||||
resourceGV string
|
||||
resourceName string
|
||||
resourceNamespaced bool
|
||||
namespaceIncludesExcludes *collections.IncludesExcludes
|
||||
expectedListedNamespaces []string
|
||||
lists []string
|
||||
labelSelector string
|
||||
actions map[string]Action
|
||||
expectedActionIDs map[string][]string
|
||||
deploymentsBackedUp bool
|
||||
expectedDeploymentsBackedUp bool
|
||||
networkPoliciesBackedUp bool
|
||||
expectedNetworkPoliciesBackedUp bool
|
||||
}{
|
||||
{
|
||||
name: "should not include resource",
|
||||
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("pods"),
|
||||
resourceGV: "v1",
|
||||
resourceName: "secrets",
|
||||
resourceNamespaced: true,
|
||||
},
|
||||
{
|
||||
name: "should skip deployments.extensions if we've seen deployments.apps",
|
||||
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||
resourceGV: "extensions/v1beta1",
|
||||
resourceName: "deployments",
|
||||
resourceNamespaced: true,
|
||||
deploymentsBackedUp: true,
|
||||
expectedDeploymentsBackedUp: true,
|
||||
},
|
||||
{
|
||||
name: "should skip deployments.apps if we've seen deployments.extensions",
|
||||
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||
resourceGV: "apps/v1beta1",
|
||||
resourceName: "deployments",
|
||||
resourceNamespaced: true,
|
||||
deploymentsBackedUp: true,
|
||||
expectedDeploymentsBackedUp: true,
|
||||
},
|
||||
{
|
||||
name: "should skip networkpolicies.extensions if we've seen networkpolicies.networking.k8s.io",
|
||||
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||
resourceGV: "extensions/v1beta1",
|
||||
resourceName: "networkpolicies",
|
||||
resourceNamespaced: true,
|
||||
networkPoliciesBackedUp: true,
|
||||
expectedNetworkPoliciesBackedUp: true,
|
||||
},
|
||||
{
|
||||
name: "should skip networkpolicies.networking.k8s.io if we've seen networkpolicies.extensions",
|
||||
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||
resourceGV: "networking.k8s.io/v1",
|
||||
resourceName: "networkpolicies",
|
||||
resourceNamespaced: true,
|
||||
networkPoliciesBackedUp: true,
|
||||
expectedNetworkPoliciesBackedUp: true,
|
||||
},
|
||||
{
|
||||
name: "list per namespace when not including *",
|
||||
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||
resourceGroup: "apps",
|
||||
resourceVersion: "v1beta1",
|
||||
resourceGV: "apps/v1beta1",
|
||||
resourceName: "deployments",
|
||||
resourceNamespaced: true,
|
||||
namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("a", "b"),
|
||||
expectedListedNamespaces: []string{"a", "b"},
|
||||
lists: []string{
|
||||
`{
|
||||
"apiVersion": "apps/v1beta1",
|
||||
"kind": "DeploymentList",
|
||||
"items": [
|
||||
{
|
||||
"metadata": {
|
||||
"namespace": "a",
|
||||
"name": "1"
|
||||
}
|
||||
}
|
||||
]
|
||||
}`,
|
||||
`{
|
||||
"apiVersion": "apps/v1beta1v1",
|
||||
"kind": "DeploymentList",
|
||||
"items": [
|
||||
{
|
||||
"metadata": {
|
||||
"namespace": "b",
|
||||
"name": "2"
|
||||
}
|
||||
}
|
||||
]
|
||||
}`,
|
||||
},
|
||||
expectedDeploymentsBackedUp: true,
|
||||
},
|
||||
{
|
||||
name: "list all namespaces when including *",
|
||||
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||
resourceGroup: "networking.k8s.io",
|
||||
resourceVersion: "v1",
|
||||
resourceGV: "networking.k8s.io/v1",
|
||||
resourceName: "networkpolicies",
|
||||
resourceNamespaced: true,
|
||||
namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||
expectedListedNamespaces: []string{""},
|
||||
lists: []string{
|
||||
`{
|
||||
"apiVersion": "networking.k8s.io/v1",
|
||||
"kind": "NetworkPolicyList",
|
||||
"items": [
|
||||
{
|
||||
"metadata": {
|
||||
"namespace": "a",
|
||||
"name": "1"
|
||||
}
|
||||
}
|
||||
]
|
||||
}`,
|
||||
},
|
||||
expectedNetworkPoliciesBackedUp: true,
|
||||
},
|
||||
{
|
||||
name: "list all namespaces when cluster-scoped, even with namespace includes",
|
||||
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||
resourceGroup: "certificates.k8s.io",
|
||||
resourceVersion: "v1beta1",
|
||||
resourceGV: "certificates.k8s.io/v1beta1",
|
||||
resourceName: "certificatesigningrequests",
|
||||
resourceNamespaced: false,
|
||||
namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("a"),
|
||||
expectedListedNamespaces: []string{""},
|
||||
labelSelector: "a=b",
|
||||
lists: []string{
|
||||
`{
|
||||
"apiVersion": "certifiaces.k8s.io/v1beta1",
|
||||
"kind": "CertificateSigningRequestList",
|
||||
"items": [
|
||||
{
|
||||
"metadata": {
|
||||
"name": "1",
|
||||
"labels": {
|
||||
"a": "b"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}`,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "use a custom action",
|
||||
resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
|
||||
resourceGroup: "certificates.k8s.io",
|
||||
resourceVersion: "v1beta1",
|
||||
resourceGV: "certificates.k8s.io/v1beta1",
|
||||
resourceName: "certificatesigningrequests",
|
||||
resourceNamespaced: false,
|
||||
namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("a"),
|
||||
expectedListedNamespaces: []string{""},
|
||||
labelSelector: "a=b",
|
||||
lists: []string{
|
||||
`{
|
||||
"apiVersion": "certificates.k8s.io/v1beta1",
|
||||
"kind": "CertificateSigningRequestList",
|
||||
"items": [
|
||||
{
|
||||
"metadata": {
|
||||
"name": "1",
|
||||
"labels": {
|
||||
"a": "b"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}`,
|
||||
},
|
||||
actions: map[string]Action{
|
||||
"certificatesigningrequests": &fakeAction{},
|
||||
"other": &fakeAction{},
|
||||
},
|
||||
expectedActionIDs: map[string][]string{
|
||||
"certificatesigningrequests": {"1"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
var labelSelector *metav1.LabelSelector
|
||||
if test.labelSelector != "" {
|
||||
s, err := metav1.ParseToLabelSelector(test.labelSelector)
|
||||
require.NoError(t, err)
|
||||
labelSelector = s
|
||||
}
|
||||
|
||||
log, _ := testlogger.NewNullLogger()
|
||||
|
||||
ctx := &backupContext{
|
||||
backup: &v1.Backup{
|
||||
Spec: v1.BackupSpec{
|
||||
LabelSelector: labelSelector,
|
||||
},
|
||||
},
|
||||
resourceIncludesExcludes: test.resourceIncludesExcludes,
|
||||
namespaceIncludesExcludes: test.namespaceIncludesExcludes,
|
||||
deploymentsBackedUp: test.deploymentsBackedUp,
|
||||
networkPoliciesBackedUp: test.networkPoliciesBackedUp,
|
||||
logger: log,
|
||||
}
|
||||
|
||||
group := &metav1.APIResourceList{
|
||||
GroupVersion: test.resourceGV,
|
||||
}
|
||||
|
||||
resource := metav1.APIResource{Name: test.resourceName, Namespaced: test.resourceNamespaced}
|
||||
|
||||
itemBackupper := &mockItemBackupper{}
|
||||
|
||||
var actualActionIDs map[string][]string
|
||||
|
||||
dynamicFactory := &arktest.FakeDynamicFactory{}
|
||||
gvr := schema.GroupVersionResource{Group: test.resourceGroup, Version: test.resourceVersion}
|
||||
gr := schema.GroupResource{Group: test.resourceGroup, Resource: test.resourceName}
|
||||
for i, namespace := range test.expectedListedNamespaces {
|
||||
obj := toRuntimeObject(t, test.lists[i])
|
||||
|
||||
client := &arktest.FakeDynamicClient{}
|
||||
client.On("List", metav1.ListOptions{LabelSelector: test.labelSelector}).Return(obj, nil)
|
||||
dynamicFactory.On("ClientForGroupVersionResource", gvr, resource, namespace).Return(client, nil)
|
||||
|
||||
action := test.actions[test.resourceName]
|
||||
|
||||
list, err := meta.ExtractList(obj)
|
||||
require.NoError(t, err)
|
||||
for i := range list {
|
||||
item := list[i].(*unstructured.Unstructured)
|
||||
itemBackupper.On("backupItem", ctx, item, gr).Return(nil)
|
||||
if action != nil {
|
||||
a, err := meta.Accessor(item)
|
||||
require.NoError(t, err)
|
||||
ns := a.GetNamespace()
|
||||
name := a.GetName()
|
||||
id := ns
|
||||
if id != "" {
|
||||
id += "/"
|
||||
}
|
||||
id += name
|
||||
if actualActionIDs == nil {
|
||||
actualActionIDs = make(map[string][]string)
|
||||
}
|
||||
actualActionIDs[test.resourceName] = append(actualActionIDs[test.resourceName], id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resources := map[schema.GroupVersionResource]schema.GroupVersionResource{
|
||||
schema.GroupVersionResource{Resource: "certificatesigningrequests"}: schema.GroupVersionResource{Group: "certificates.k8s.io", Version: "v1beta1", Resource: "certificatesigningrequests"},
|
||||
schema.GroupVersionResource{Resource: "other"}: schema.GroupVersionResource{Group: "somegroup", Version: "someversion", Resource: "otherthings"},
|
||||
}
|
||||
discoveryHelper := arktest.NewFakeDiscoveryHelper(false, resources)
|
||||
|
||||
podCommandExecutor := &arktest.PodCommandExecutor{}
|
||||
defer podCommandExecutor.AssertExpectations(t)
|
||||
|
||||
kb, err := NewKubernetesBackupper(discoveryHelper, dynamicFactory, test.actions, podCommandExecutor)
|
||||
require.NoError(t, err)
|
||||
backupper := kb.(*kubernetesBackupper)
|
||||
backupper.itemBackupper = itemBackupper
|
||||
|
||||
err = backupper.backupResource(ctx, group, resource)
|
||||
|
||||
assert.Equal(t, test.expectedDeploymentsBackedUp, ctx.deploymentsBackedUp)
|
||||
assert.Equal(t, test.expectedNetworkPoliciesBackedUp, ctx.networkPoliciesBackedUp)
|
||||
assert.Equal(t, test.expectedActionIDs, actualActionIDs)
|
||||
})
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
@@ -1,121 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 Heptio Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package backup
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
|
||||
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
kubeutil "github.com/heptio/ark/pkg/util/kube"
|
||||
)
|
||||
|
||||
// zoneLabel is the label that stores availability-zone info
|
||||
// on PVs
|
||||
const zoneLabel = "failure-domain.beta.kubernetes.io/zone"
|
||||
|
||||
// volumeSnapshotAction is a struct that knows how to take snapshots of PersistentVolumes
|
||||
// that are backed by compatible cloud volumes.
|
||||
type volumeSnapshotAction struct {
|
||||
snapshotService cloudprovider.SnapshotService
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
func NewVolumeSnapshotAction(snapshotService cloudprovider.SnapshotService) (Action, error) {
|
||||
if snapshotService == nil {
|
||||
return nil, errors.New("snapshotService cannot be nil")
|
||||
}
|
||||
|
||||
return &volumeSnapshotAction{
|
||||
snapshotService: snapshotService,
|
||||
clock: clock.RealClock{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Execute triggers a snapshot for the volume/disk underlying a PersistentVolume if the provided
|
||||
// backup has volume snapshots enabled and the PV is of a compatible type. Also records cloud
|
||||
// disk type and IOPS (if applicable) to be able to restore to current state later.
|
||||
func (a *volumeSnapshotAction) Execute(log *logrus.Entry, item runtime.Unstructured, backup *api.Backup) ([]ResourceIdentifier, error) {
|
||||
var noAdditionalItems []ResourceIdentifier
|
||||
|
||||
log.Info("Executing volumeSnapshotAction")
|
||||
|
||||
if backup.Spec.SnapshotVolumes != nil && !*backup.Spec.SnapshotVolumes {
|
||||
log.Info("Backup has volume snapshots disabled; skipping volume snapshot action.")
|
||||
return noAdditionalItems, nil
|
||||
}
|
||||
|
||||
metadata, err := meta.Accessor(item)
|
||||
if err != nil {
|
||||
return noAdditionalItems, errors.WithStack(err)
|
||||
}
|
||||
|
||||
name := metadata.GetName()
|
||||
var pvFailureDomainZone string
|
||||
labels := metadata.GetLabels()
|
||||
|
||||
if labels[zoneLabel] != "" {
|
||||
pvFailureDomainZone = labels[zoneLabel]
|
||||
} else {
|
||||
log.Infof("label %q is not present on PersistentVolume", zoneLabel)
|
||||
}
|
||||
|
||||
volumeID, err := kubeutil.GetVolumeID(item.UnstructuredContent())
|
||||
// non-nil error means it's a supported PV source but volume ID can't be found
|
||||
if err != nil {
|
||||
return noAdditionalItems, errors.Wrapf(err, "error getting volume ID for PersistentVolume")
|
||||
}
|
||||
// no volumeID / nil error means unsupported PV source
|
||||
if volumeID == "" {
|
||||
log.Info("PersistentVolume is not a supported volume type for snapshots, skipping.")
|
||||
return noAdditionalItems, nil
|
||||
}
|
||||
|
||||
log = log.WithField("volumeID", volumeID)
|
||||
|
||||
log.Info("Snapshotting PersistentVolume")
|
||||
snapshotID, err := a.snapshotService.CreateSnapshot(volumeID, pvFailureDomainZone)
|
||||
if err != nil {
|
||||
// log+error on purpose - log goes to the per-backup log file, error goes to the backup
|
||||
log.WithError(err).Error("error creating snapshot")
|
||||
return noAdditionalItems, errors.WithMessage(err, "error creating snapshot")
|
||||
}
|
||||
|
||||
volumeType, iops, err := a.snapshotService.GetVolumeInfo(volumeID, pvFailureDomainZone)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("error getting volume info")
|
||||
return noAdditionalItems, errors.WithMessage(err, "error getting volume info")
|
||||
}
|
||||
|
||||
if backup.Status.VolumeBackups == nil {
|
||||
backup.Status.VolumeBackups = make(map[string]*api.VolumeBackupInfo)
|
||||
}
|
||||
|
||||
backup.Status.VolumeBackups[name] = &api.VolumeBackupInfo{
|
||||
SnapshotID: snapshotID,
|
||||
Type: volumeType,
|
||||
Iops: iops,
|
||||
AvailabilityZone: pvFailureDomainZone,
|
||||
}
|
||||
|
||||
return noAdditionalItems, nil
|
||||
}
|
||||
@@ -1,246 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 Heptio Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package backup
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
|
||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
arktest "github.com/heptio/ark/pkg/util/test"
|
||||
)
|
||||
|
||||
func TestVolumeSnapshotAction(t *testing.T) {
|
||||
iops := int64(1000)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
snapshotEnabled bool
|
||||
pv string
|
||||
ttl time.Duration
|
||||
expectError bool
|
||||
expectedVolumeID string
|
||||
expectedSnapshotsTaken int
|
||||
existingVolumeBackups map[string]*v1.VolumeBackupInfo
|
||||
volumeInfo map[string]v1.VolumeBackupInfo
|
||||
}{
|
||||
{
|
||||
name: "snapshot disabled",
|
||||
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}}`,
|
||||
snapshotEnabled: false,
|
||||
},
|
||||
{
|
||||
name: "can't find volume id - missing spec",
|
||||
snapshotEnabled: true,
|
||||
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}}`,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "unsupported PV source type",
|
||||
snapshotEnabled: true,
|
||||
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"unsupportedPVSource": {}}}`,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "can't find volume id - aws but no volume id",
|
||||
snapshotEnabled: true,
|
||||
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"awsElasticBlockStore": {}}}`,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "can't find volume id - gce but no volume id",
|
||||
snapshotEnabled: true,
|
||||
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"gcePersistentDisk": {}}}`,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "aws - simple volume id",
|
||||
snapshotEnabled: true,
|
||||
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv", "labels": {"failure-domain.beta.kubernetes.io/zone": "us-east-1c"}}, "spec": {"awsElasticBlockStore": {"volumeID": "aws://us-east-1c/vol-abc123"}}}`,
|
||||
expectError: false,
|
||||
expectedSnapshotsTaken: 1,
|
||||
expectedVolumeID: "vol-abc123",
|
||||
ttl: 5 * time.Minute,
|
||||
volumeInfo: map[string]v1.VolumeBackupInfo{
|
||||
"vol-abc123": v1.VolumeBackupInfo{Type: "gp", SnapshotID: "snap-1", AvailabilityZone: "us-east-1c"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "aws - simple volume id with provisioned IOPS",
|
||||
snapshotEnabled: true,
|
||||
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv", "labels": {"failure-domain.beta.kubernetes.io/zone": "us-east-1c"}}, "spec": {"awsElasticBlockStore": {"volumeID": "aws://us-east-1c/vol-abc123"}}}`,
|
||||
expectError: false,
|
||||
expectedSnapshotsTaken: 1,
|
||||
expectedVolumeID: "vol-abc123",
|
||||
ttl: 5 * time.Minute,
|
||||
volumeInfo: map[string]v1.VolumeBackupInfo{
|
||||
"vol-abc123": v1.VolumeBackupInfo{Type: "io1", Iops: &iops, SnapshotID: "snap-1", AvailabilityZone: "us-east-1c"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "aws - dynamically provisioned volume id",
|
||||
snapshotEnabled: true,
|
||||
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv", "labels": {"failure-domain.beta.kubernetes.io/zone": "us-west-2a"}}, "spec": {"awsElasticBlockStore": {"volumeID": "aws://us-west-2a/vol-abc123"}}}`,
|
||||
expectError: false,
|
||||
expectedSnapshotsTaken: 1,
|
||||
expectedVolumeID: "vol-abc123",
|
||||
ttl: 5 * time.Minute,
|
||||
volumeInfo: map[string]v1.VolumeBackupInfo{
|
||||
"vol-abc123": v1.VolumeBackupInfo{Type: "gp", SnapshotID: "snap-1", AvailabilityZone: "us-west-2a"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "gce",
|
||||
snapshotEnabled: true,
|
||||
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv", "labels": {"failure-domain.beta.kubernetes.io/zone": "gcp-zone2"}}, "spec": {"gcePersistentDisk": {"pdName": "pd-abc123"}}}`,
|
||||
expectError: false,
|
||||
expectedSnapshotsTaken: 1,
|
||||
expectedVolumeID: "pd-abc123",
|
||||
ttl: 5 * time.Minute,
|
||||
volumeInfo: map[string]v1.VolumeBackupInfo{
|
||||
"pd-abc123": v1.VolumeBackupInfo{Type: "gp", SnapshotID: "snap-1", AvailabilityZone: "gcp-zone2"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "azure",
|
||||
snapshotEnabled: true,
|
||||
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"azureDisk": {"diskName": "foo-disk"}}}`,
|
||||
expectError: false,
|
||||
expectedSnapshotsTaken: 1,
|
||||
expectedVolumeID: "foo-disk",
|
||||
ttl: 5 * time.Minute,
|
||||
volumeInfo: map[string]v1.VolumeBackupInfo{
|
||||
"foo-disk": v1.VolumeBackupInfo{Type: "gp", SnapshotID: "snap-1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "preexisting volume backup info in backup status",
|
||||
snapshotEnabled: true,
|
||||
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"gcePersistentDisk": {"pdName": "pd-abc123"}}}`,
|
||||
expectError: false,
|
||||
expectedSnapshotsTaken: 1,
|
||||
expectedVolumeID: "pd-abc123",
|
||||
ttl: 5 * time.Minute,
|
||||
existingVolumeBackups: map[string]*v1.VolumeBackupInfo{
|
||||
"anotherpv": &v1.VolumeBackupInfo{SnapshotID: "anothersnap"},
|
||||
},
|
||||
volumeInfo: map[string]v1.VolumeBackupInfo{
|
||||
"pd-abc123": v1.VolumeBackupInfo{Type: "gp", SnapshotID: "snap-1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "create snapshot error",
|
||||
snapshotEnabled: true,
|
||||
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"gcePersistentDisk": {"pdName": "pd-abc123"}}}`,
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "PV with label metadata but no failureDomainZone",
|
||||
snapshotEnabled: true,
|
||||
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv", "labels": {"failure-domain.beta.kubernetes.io/region": "us-east-1"}}, "spec": {"awsElasticBlockStore": {"volumeID": "aws://us-east-1c/vol-abc123"}}}`,
|
||||
expectError: false,
|
||||
expectedSnapshotsTaken: 1,
|
||||
expectedVolumeID: "vol-abc123",
|
||||
ttl: 5 * time.Minute,
|
||||
volumeInfo: map[string]v1.VolumeBackupInfo{
|
||||
"vol-abc123": v1.VolumeBackupInfo{Type: "gp", SnapshotID: "snap-1"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
backup := &v1.Backup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: v1.DefaultNamespace,
|
||||
Name: "mybackup",
|
||||
},
|
||||
Spec: v1.BackupSpec{
|
||||
SnapshotVolumes: &test.snapshotEnabled,
|
||||
TTL: metav1.Duration{Duration: test.ttl},
|
||||
},
|
||||
Status: v1.BackupStatus{
|
||||
VolumeBackups: test.existingVolumeBackups,
|
||||
},
|
||||
}
|
||||
|
||||
snapshotService := &arktest.FakeSnapshotService{SnapshottableVolumes: test.volumeInfo}
|
||||
|
||||
vsa, _ := NewVolumeSnapshotAction(snapshotService)
|
||||
action := vsa.(*volumeSnapshotAction)
|
||||
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
action.clock = fakeClock
|
||||
|
||||
pv, err := getAsMap(test.pv)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// method under test
|
||||
additionalItems, err := action.Execute(arktest.NewLogger(), &unstructured.Unstructured{Object: pv}, backup)
|
||||
assert.Len(t, additionalItems, 0)
|
||||
|
||||
gotErr := err != nil
|
||||
|
||||
if e, a := test.expectError, gotErr; e != a {
|
||||
t.Errorf("error: expected %v, got %v", e, a)
|
||||
}
|
||||
if test.expectError {
|
||||
return
|
||||
}
|
||||
|
||||
if !test.snapshotEnabled {
|
||||
// don't need to check anything else if snapshots are disabled
|
||||
return
|
||||
}
|
||||
|
||||
expectedVolumeBackups := test.existingVolumeBackups
|
||||
if expectedVolumeBackups == nil {
|
||||
expectedVolumeBackups = make(map[string]*v1.VolumeBackupInfo)
|
||||
}
|
||||
|
||||
// we should have one snapshot taken exactly
|
||||
require.Equal(t, test.expectedSnapshotsTaken, snapshotService.SnapshotsTaken.Len())
|
||||
|
||||
if test.expectedSnapshotsTaken > 0 {
|
||||
// the snapshotID should be the one in the entry in snapshotService.SnapshottableVolumes
|
||||
// for the volume we ran the test for
|
||||
snapshotID, _ := snapshotService.SnapshotsTaken.PopAny()
|
||||
|
||||
expectedVolumeBackups["mypv"] = &v1.VolumeBackupInfo{
|
||||
SnapshotID: snapshotID,
|
||||
Type: test.volumeInfo[test.expectedVolumeID].Type,
|
||||
Iops: test.volumeInfo[test.expectedVolumeID].Iops,
|
||||
AvailabilityZone: test.volumeInfo[test.expectedVolumeID].AvailabilityZone,
|
||||
}
|
||||
|
||||
if e, a := expectedVolumeBackups, backup.Status.VolumeBackups; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("backup.status.VolumeBackups: expected %v, got %v", e, a)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -92,7 +92,7 @@ type Dynamic interface {
|
||||
|
||||
// dynamicResourceClient implements Dynamic.
|
||||
type dynamicResourceClient struct {
|
||||
resourceClient *dynamic.ResourceClient
|
||||
resourceClient dynamic.ResourceInterface
|
||||
}
|
||||
|
||||
var _ Dynamic = &dynamicResourceClient{}
|
||||
|
||||
@@ -20,16 +20,21 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"github.com/heptio/ark/pkg/generated/clientset"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
clientset "github.com/heptio/ark/pkg/generated/clientset/versioned"
|
||||
)
|
||||
|
||||
// Factory knows how to create an ArkClient.
|
||||
// Factory knows how to create an ArkClient and Kubernetes client.
|
||||
type Factory interface {
|
||||
// BindFlags binds common flags such as --kubeconfig to the passed-in FlagSet.
|
||||
BindFlags(flags *pflag.FlagSet)
|
||||
// Client returns an ArkClient. It uses the following priority to specify the cluster
|
||||
// configuration: --kubeconfig flag, KUBECONFIG environment variable, in-cluster configuration.
|
||||
Client() (clientset.Interface, error)
|
||||
// KubeClient returns a Kubernetes client. It uses the following priority to specify the cluster
|
||||
// configuration: --kubeconfig flag, KUBECONFIG environment variable, in-cluster configuration.
|
||||
KubeClient() (kubernetes.Interface, error)
|
||||
}
|
||||
|
||||
type factory struct {
|
||||
@@ -65,3 +70,16 @@ func (f *factory) Client() (clientset.Interface, error) {
|
||||
}
|
||||
return arkClient, nil
|
||||
}
|
||||
|
||||
func (f *factory) KubeClient() (kubernetes.Interface, error) {
|
||||
clientConfig, err := Config(f.kubeconfig, f.baseName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kubeClient, err := kubernetes.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
return kubeClient, nil
|
||||
}
|
||||
|
||||
@@ -17,19 +17,28 @@ limitations under the License.
|
||||
package aws
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
)
|
||||
|
||||
var _ cloudprovider.BlockStorageAdapter = &blockStorageAdapter{}
|
||||
const regionKey = "region"
|
||||
|
||||
type blockStorageAdapter struct {
|
||||
// iopsVolumeTypes is a set of AWS EBS volume types for which IOPS should
|
||||
// be captured during snapshot and provided when creating a new volume
|
||||
// from snapshot.
|
||||
var iopsVolumeTypes = sets.NewString("io1")
|
||||
|
||||
type blockStore struct {
|
||||
ec2 *ec2.EC2
|
||||
}
|
||||
|
||||
@@ -46,29 +55,29 @@ func getSession(config *aws.Config) (*session.Session, error) {
|
||||
return sess, nil
|
||||
}
|
||||
|
||||
func NewBlockStorageAdapter(region string) (cloudprovider.BlockStorageAdapter, error) {
|
||||
func NewBlockStore() cloudprovider.BlockStore {
|
||||
return &blockStore{}
|
||||
}
|
||||
|
||||
func (b *blockStore) Init(config map[string]string) error {
|
||||
region := config[regionKey]
|
||||
if region == "" {
|
||||
return nil, errors.New("missing region in aws configuration in config file")
|
||||
return errors.Errorf("missing %s in aws configuration", regionKey)
|
||||
}
|
||||
|
||||
awsConfig := aws.NewConfig().WithRegion(region)
|
||||
|
||||
sess, err := getSession(awsConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
return &blockStorageAdapter{
|
||||
ec2: ec2.New(sess),
|
||||
}, nil
|
||||
b.ec2 = ec2.New(sess)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// iopsVolumeTypes is a set of AWS EBS volume types for which IOPS should
|
||||
// be captured during snapshot and provided when creating a new volume
|
||||
// from snapshot.
|
||||
var iopsVolumeTypes = sets.NewString("io1")
|
||||
|
||||
func (op *blockStorageAdapter) CreateVolumeFromSnapshot(snapshotID, volumeType, volumeAZ string, iops *int64) (volumeID string, err error) {
|
||||
func (b *blockStore) CreateVolumeFromSnapshot(snapshotID, volumeType, volumeAZ string, iops *int64) (volumeID string, err error) {
|
||||
req := &ec2.CreateVolumeInput{
|
||||
SnapshotId: &snapshotID,
|
||||
AvailabilityZone: &volumeAZ,
|
||||
@@ -79,7 +88,7 @@ func (op *blockStorageAdapter) CreateVolumeFromSnapshot(snapshotID, volumeType,
|
||||
req.Iops = iops
|
||||
}
|
||||
|
||||
res, err := op.ec2.CreateVolume(req)
|
||||
res, err := b.ec2.CreateVolume(req)
|
||||
if err != nil {
|
||||
return "", errors.WithStack(err)
|
||||
}
|
||||
@@ -87,12 +96,12 @@ func (op *blockStorageAdapter) CreateVolumeFromSnapshot(snapshotID, volumeType,
|
||||
return *res.VolumeId, nil
|
||||
}
|
||||
|
||||
func (op *blockStorageAdapter) GetVolumeInfo(volumeID, volumeAZ string) (string, *int64, error) {
|
||||
func (b *blockStore) GetVolumeInfo(volumeID, volumeAZ string) (string, *int64, error) {
|
||||
req := &ec2.DescribeVolumesInput{
|
||||
VolumeIds: []*string{&volumeID},
|
||||
}
|
||||
|
||||
res, err := op.ec2.DescribeVolumes(req)
|
||||
res, err := b.ec2.DescribeVolumes(req)
|
||||
if err != nil {
|
||||
return "", nil, errors.WithStack(err)
|
||||
}
|
||||
@@ -119,12 +128,12 @@ func (op *blockStorageAdapter) GetVolumeInfo(volumeID, volumeAZ string) (string,
|
||||
return volumeType, iops, nil
|
||||
}
|
||||
|
||||
func (op *blockStorageAdapter) IsVolumeReady(volumeID, volumeAZ string) (ready bool, err error) {
|
||||
func (b *blockStore) IsVolumeReady(volumeID, volumeAZ string) (ready bool, err error) {
|
||||
req := &ec2.DescribeVolumesInput{
|
||||
VolumeIds: []*string{&volumeID},
|
||||
}
|
||||
|
||||
res, err := op.ec2.DescribeVolumes(req)
|
||||
res, err := b.ec2.DescribeVolumes(req)
|
||||
if err != nil {
|
||||
return false, errors.WithStack(err)
|
||||
}
|
||||
@@ -135,38 +144,12 @@ func (op *blockStorageAdapter) IsVolumeReady(volumeID, volumeAZ string) (ready b
|
||||
return *res.Volumes[0].State == ec2.VolumeStateAvailable, nil
|
||||
}
|
||||
|
||||
func (op *blockStorageAdapter) ListSnapshots(tagFilters map[string]string) ([]string, error) {
|
||||
req := &ec2.DescribeSnapshotsInput{}
|
||||
|
||||
for k, v := range tagFilters {
|
||||
filter := &ec2.Filter{}
|
||||
filter.SetName(k)
|
||||
filter.SetValues([]*string{&v})
|
||||
|
||||
req.Filters = append(req.Filters, filter)
|
||||
}
|
||||
|
||||
var ret []string
|
||||
err := op.ec2.DescribeSnapshotsPages(req, func(res *ec2.DescribeSnapshotsOutput, lastPage bool) bool {
|
||||
for _, snapshot := range res.Snapshots {
|
||||
ret = append(ret, *snapshot.SnapshotId)
|
||||
}
|
||||
|
||||
return !lastPage
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (op *blockStorageAdapter) CreateSnapshot(volumeID, volumeAZ string, tags map[string]string) (string, error) {
|
||||
func (b *blockStore) CreateSnapshot(volumeID, volumeAZ string, tags map[string]string) (string, error) {
|
||||
req := &ec2.CreateSnapshotInput{
|
||||
VolumeId: &volumeID,
|
||||
}
|
||||
|
||||
res, err := op.ec2.CreateSnapshot(req)
|
||||
res, err := b.ec2.CreateSnapshot(req)
|
||||
if err != nil {
|
||||
return "", errors.WithStack(err)
|
||||
}
|
||||
@@ -186,17 +169,43 @@ func (op *blockStorageAdapter) CreateSnapshot(volumeID, volumeAZ string, tags ma
|
||||
|
||||
tagsReq.SetTags(ec2Tags)
|
||||
|
||||
_, err = op.ec2.CreateTags(tagsReq)
|
||||
_, err = b.ec2.CreateTags(tagsReq)
|
||||
|
||||
return *res.SnapshotId, errors.WithStack(err)
|
||||
}
|
||||
|
||||
func (op *blockStorageAdapter) DeleteSnapshot(snapshotID string) error {
|
||||
func (b *blockStore) DeleteSnapshot(snapshotID string) error {
|
||||
req := &ec2.DeleteSnapshotInput{
|
||||
SnapshotId: &snapshotID,
|
||||
}
|
||||
|
||||
_, err := op.ec2.DeleteSnapshot(req)
|
||||
_, err := b.ec2.DeleteSnapshot(req)
|
||||
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
var ebsVolumeIDRegex = regexp.MustCompile("vol-.*")
|
||||
|
||||
func (b *blockStore) GetVolumeID(pv runtime.Unstructured) (string, error) {
|
||||
if !collections.Exists(pv.UnstructuredContent(), "spec.awsElasticBlockStore") {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
volumeID, err := collections.GetString(pv.UnstructuredContent(), "spec.awsElasticBlockStore.volumeID")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return ebsVolumeIDRegex.FindString(volumeID), nil
|
||||
}
|
||||
|
||||
func (b *blockStore) SetVolumeID(pv runtime.Unstructured, volumeID string) (runtime.Unstructured, error) {
|
||||
aws, err := collections.GetMap(pv.UnstructuredContent(), "spec.awsElasticBlockStore")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
aws["volumeID"] = volumeID
|
||||
|
||||
return pv, nil
|
||||
}
|
||||
86
pkg/cloudprovider/aws/block_store_test.go
Normal file
86
pkg/cloudprovider/aws/block_store_test.go
Normal file
@@ -0,0 +1,86 @@
|
||||
/*
|
||||
Copyright 2017 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package aws
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
func TestGetVolumeID(t *testing.T) {
|
||||
b := &blockStore{}
|
||||
|
||||
pv := &unstructured.Unstructured{}
|
||||
|
||||
// missing spec.awsElasticBlockStore -> no error
|
||||
volumeID, err := b.GetVolumeID(pv)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "", volumeID)
|
||||
|
||||
// missing spec.awsElasticBlockStore.volumeID -> error
|
||||
aws := map[string]interface{}{}
|
||||
pv.Object["spec"] = map[string]interface{}{
|
||||
"awsElasticBlockStore": aws,
|
||||
}
|
||||
volumeID, err = b.GetVolumeID(pv)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "", volumeID)
|
||||
|
||||
// regex miss
|
||||
aws["volumeID"] = "foo"
|
||||
volumeID, err = b.GetVolumeID(pv)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "", volumeID)
|
||||
|
||||
// regex match 1
|
||||
aws["volumeID"] = "aws://us-east-1c/vol-abc123"
|
||||
volumeID, err = b.GetVolumeID(pv)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "vol-abc123", volumeID)
|
||||
|
||||
// regex match 2
|
||||
aws["volumeID"] = "vol-abc123"
|
||||
volumeID, err = b.GetVolumeID(pv)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "vol-abc123", volumeID)
|
||||
}
|
||||
|
||||
func TestSetVolumeID(t *testing.T) {
|
||||
b := &blockStore{}
|
||||
|
||||
pv := &unstructured.Unstructured{}
|
||||
|
||||
// missing spec.awsElasticBlockStore -> error
|
||||
updatedPV, err := b.SetVolumeID(pv, "vol-updated")
|
||||
require.Error(t, err)
|
||||
|
||||
// happy path
|
||||
aws := map[string]interface{}{}
|
||||
pv.Object["spec"] = map[string]interface{}{
|
||||
"awsElasticBlockStore": aws,
|
||||
}
|
||||
updatedPV, err = b.SetVolumeID(pv, "vol-updated")
|
||||
require.NoError(t, err)
|
||||
actual, err := collections.GetString(updatedPV.UnstructuredContent(), "spec.awsElasticBlockStore.volumeID")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "vol-updated", actual)
|
||||
}
|
||||
@@ -18,26 +18,52 @@ package aws
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/endpoints"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
var _ cloudprovider.ObjectStorageAdapter = &objectStorageAdapter{}
|
||||
const (
|
||||
s3URLKey = "s3Url"
|
||||
kmsKeyIDKey = "kmsKeyId"
|
||||
s3ForcePathStyleKey = "s3ForcePathStyle"
|
||||
)
|
||||
|
||||
type objectStorageAdapter struct {
|
||||
s3 *s3.S3
|
||||
kmsKeyID string
|
||||
type objectStore struct {
|
||||
s3 *s3.S3
|
||||
s3Uploader *s3manager.Uploader
|
||||
kmsKeyID string
|
||||
}
|
||||
|
||||
func NewObjectStorageAdapter(region, s3URL, kmsKeyID string, s3ForcePathStyle bool) (cloudprovider.ObjectStorageAdapter, error) {
|
||||
func NewObjectStore() cloudprovider.ObjectStore {
|
||||
return &objectStore{}
|
||||
}
|
||||
|
||||
func (o *objectStore) Init(config map[string]string) error {
|
||||
var (
|
||||
region = config[regionKey]
|
||||
s3URL = config[s3URLKey]
|
||||
kmsKeyID = config[kmsKeyIDKey]
|
||||
s3ForcePathStyleVal = config[s3ForcePathStyleKey]
|
||||
s3ForcePathStyle bool
|
||||
err error
|
||||
)
|
||||
|
||||
if region == "" {
|
||||
return nil, errors.New("missing region in aws configuration in config file")
|
||||
return errors.Errorf("missing %s in aws configuration", regionKey)
|
||||
}
|
||||
|
||||
if s3ForcePathStyleVal != "" {
|
||||
if s3ForcePathStyle, err = strconv.ParseBool(s3ForcePathStyleVal); err != nil {
|
||||
return errors.Wrapf(err, "could not parse %s (expected bool)", s3ForcePathStyleKey)
|
||||
}
|
||||
}
|
||||
|
||||
awsConfig := aws.NewConfig().
|
||||
@@ -60,40 +86,41 @@ func NewObjectStorageAdapter(region, s3URL, kmsKeyID string, s3ForcePathStyle bo
|
||||
|
||||
sess, err := getSession(awsConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
return &objectStorageAdapter{
|
||||
s3: s3.New(sess),
|
||||
kmsKeyID: kmsKeyID,
|
||||
}, nil
|
||||
o.s3 = s3.New(sess)
|
||||
o.s3Uploader = s3manager.NewUploader(sess)
|
||||
o.kmsKeyID = kmsKeyID
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (op *objectStorageAdapter) PutObject(bucket string, key string, body io.ReadSeeker) error {
|
||||
req := &s3.PutObjectInput{
|
||||
func (o *objectStore) PutObject(bucket string, key string, body io.Reader) error {
|
||||
req := &s3manager.UploadInput{
|
||||
Bucket: &bucket,
|
||||
Key: &key,
|
||||
Body: body,
|
||||
}
|
||||
|
||||
// if kmsKeyID is not empty, enable "aws:kms" encryption
|
||||
if op.kmsKeyID != "" {
|
||||
if o.kmsKeyID != "" {
|
||||
req.ServerSideEncryption = aws.String("aws:kms")
|
||||
req.SSEKMSKeyId = &op.kmsKeyID
|
||||
req.SSEKMSKeyId = &o.kmsKeyID
|
||||
}
|
||||
|
||||
_, err := op.s3.PutObject(req)
|
||||
_, err := o.s3Uploader.Upload(req)
|
||||
|
||||
return errors.Wrapf(err, "error putting object %s", key)
|
||||
}
|
||||
|
||||
func (op *objectStorageAdapter) GetObject(bucket string, key string) (io.ReadCloser, error) {
|
||||
func (o *objectStore) GetObject(bucket string, key string) (io.ReadCloser, error) {
|
||||
req := &s3.GetObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &key,
|
||||
}
|
||||
|
||||
res, err := op.s3.GetObject(req)
|
||||
res, err := o.s3.GetObject(req)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting object %s", key)
|
||||
}
|
||||
@@ -101,14 +128,14 @@ func (op *objectStorageAdapter) GetObject(bucket string, key string) (io.ReadClo
|
||||
return res.Body, nil
|
||||
}
|
||||
|
||||
func (op *objectStorageAdapter) ListCommonPrefixes(bucket string, delimiter string) ([]string, error) {
|
||||
func (o *objectStore) ListCommonPrefixes(bucket string, delimiter string) ([]string, error) {
|
||||
req := &s3.ListObjectsV2Input{
|
||||
Bucket: &bucket,
|
||||
Delimiter: &delimiter,
|
||||
}
|
||||
|
||||
var ret []string
|
||||
err := op.s3.ListObjectsV2Pages(req, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
|
||||
err := o.s3.ListObjectsV2Pages(req, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
|
||||
for _, prefix := range page.CommonPrefixes {
|
||||
ret = append(ret, *prefix.Prefix)
|
||||
}
|
||||
@@ -122,14 +149,14 @@ func (op *objectStorageAdapter) ListCommonPrefixes(bucket string, delimiter stri
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (op *objectStorageAdapter) ListObjects(bucket, prefix string) ([]string, error) {
|
||||
func (o *objectStore) ListObjects(bucket, prefix string) ([]string, error) {
|
||||
req := &s3.ListObjectsV2Input{
|
||||
Bucket: &bucket,
|
||||
Prefix: &prefix,
|
||||
}
|
||||
|
||||
var ret []string
|
||||
err := op.s3.ListObjectsV2Pages(req, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
|
||||
err := o.s3.ListObjectsV2Pages(req, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
|
||||
for _, obj := range page.Contents {
|
||||
ret = append(ret, *obj.Key)
|
||||
}
|
||||
@@ -143,19 +170,19 @@ func (op *objectStorageAdapter) ListObjects(bucket, prefix string) ([]string, er
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (op *objectStorageAdapter) DeleteObject(bucket string, key string) error {
|
||||
func (o *objectStore) DeleteObject(bucket string, key string) error {
|
||||
req := &s3.DeleteObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: &key,
|
||||
}
|
||||
|
||||
_, err := op.s3.DeleteObject(req)
|
||||
_, err := o.s3.DeleteObject(req)
|
||||
|
||||
return errors.Wrapf(err, "error deleting object %s", key)
|
||||
}
|
||||
|
||||
func (op *objectStorageAdapter) CreateSignedURL(bucket, key string, ttl time.Duration) (string, error) {
|
||||
req, _ := op.s3.GetObjectRequest(&s3.GetObjectInput{
|
||||
func (o *objectStore) CreateSignedURL(bucket, key string, ttl time.Duration) (string, error) {
|
||||
req, _ := o.s3.GetObjectRequest(&s3.GetObjectInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/disk"
|
||||
@@ -29,21 +30,12 @@ import (
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/satori/uuid"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
)
|
||||
|
||||
type blockStorageAdapter struct {
|
||||
disks *disk.DisksClient
|
||||
snaps *disk.SnapshotsClient
|
||||
subscription string
|
||||
resourceGroup string
|
||||
location string
|
||||
apiTimeout time.Duration
|
||||
}
|
||||
|
||||
var _ cloudprovider.BlockStorageAdapter = &blockStorageAdapter{}
|
||||
|
||||
const (
|
||||
azureClientIDKey string = "AZURE_CLIENT_ID"
|
||||
azureClientSecretKey string = "AZURE_CLIENT_SECRET"
|
||||
@@ -52,8 +44,20 @@ const (
|
||||
azureStorageAccountIDKey string = "AZURE_STORAGE_ACCOUNT_ID"
|
||||
azureStorageKeyKey string = "AZURE_STORAGE_KEY"
|
||||
azureResourceGroupKey string = "AZURE_RESOURCE_GROUP"
|
||||
|
||||
locationKey = "location"
|
||||
apiTimeoutKey = "apiTimeout"
|
||||
)
|
||||
|
||||
type blockStore struct {
|
||||
disks *disk.DisksClient
|
||||
snaps *disk.SnapshotsClient
|
||||
subscription string
|
||||
resourceGroup string
|
||||
location string
|
||||
apiTimeout time.Duration
|
||||
}
|
||||
|
||||
func getConfig() map[string]string {
|
||||
cfg := map[string]string{
|
||||
azureClientIDKey: "",
|
||||
@@ -72,9 +76,24 @@ func getConfig() map[string]string {
|
||||
return cfg
|
||||
}
|
||||
|
||||
func NewBlockStorageAdapter(location string, apiTimeout time.Duration) (cloudprovider.BlockStorageAdapter, error) {
|
||||
func NewBlockStore() cloudprovider.BlockStore {
|
||||
return &blockStore{}
|
||||
}
|
||||
|
||||
func (b *blockStore) Init(config map[string]string) error {
|
||||
var (
|
||||
location = config[locationKey]
|
||||
apiTimeoutVal = config[apiTimeoutKey]
|
||||
apiTimeout time.Duration
|
||||
err error
|
||||
)
|
||||
|
||||
if location == "" {
|
||||
return nil, errors.New("missing location in azure configuration in config file")
|
||||
return errors.Errorf("missing %s in azure configuration", locationKey)
|
||||
}
|
||||
|
||||
if apiTimeout, err = time.ParseDuration(apiTimeoutVal); err != nil {
|
||||
return errors.Wrapf(err, "could not parse %s (expected time.Duration)", apiTimeoutKey)
|
||||
}
|
||||
|
||||
if apiTimeout == 0 {
|
||||
@@ -85,7 +104,7 @@ func NewBlockStorageAdapter(location string, apiTimeout time.Duration) (cloudpro
|
||||
|
||||
spt, err := helpers.NewServicePrincipalTokenFromCredentials(cfg, azure.PublicCloud.ResourceManagerEndpoint)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error creating new service principal token")
|
||||
return errors.Wrap(err, "error creating new service principal token")
|
||||
}
|
||||
|
||||
disksClient := disk.NewDisksClient(cfg[azureSubscriptionIDKey])
|
||||
@@ -101,11 +120,11 @@ func NewBlockStorageAdapter(location string, apiTimeout time.Duration) (cloudpro
|
||||
|
||||
locs, err := groupClient.ListLocations(cfg[azureSubscriptionIDKey])
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
if locs.Value == nil {
|
||||
return nil, errors.New("no locations returned from Azure API")
|
||||
return errors.New("no locations returned from Azure API")
|
||||
}
|
||||
|
||||
locationExists := false
|
||||
@@ -117,26 +136,26 @@ func NewBlockStorageAdapter(location string, apiTimeout time.Duration) (cloudpro
|
||||
}
|
||||
|
||||
if !locationExists {
|
||||
return nil, errors.Errorf("location %q not found", location)
|
||||
return errors.Errorf("location %q not found", location)
|
||||
}
|
||||
|
||||
return &blockStorageAdapter{
|
||||
disks: &disksClient,
|
||||
snaps: &snapsClient,
|
||||
subscription: cfg[azureSubscriptionIDKey],
|
||||
resourceGroup: cfg[azureResourceGroupKey],
|
||||
location: location,
|
||||
apiTimeout: apiTimeout,
|
||||
}, nil
|
||||
b.disks = &disksClient
|
||||
b.snaps = &snapsClient
|
||||
b.subscription = cfg[azureSubscriptionIDKey]
|
||||
b.resourceGroup = cfg[azureResourceGroupKey]
|
||||
b.location = location
|
||||
b.apiTimeout = apiTimeout
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (op *blockStorageAdapter) CreateVolumeFromSnapshot(snapshotID, volumeType, volumeAZ string, iops *int64) (string, error) {
|
||||
fullSnapshotName := getFullSnapshotName(op.subscription, op.resourceGroup, snapshotID)
|
||||
func (b *blockStore) CreateVolumeFromSnapshot(snapshotID, volumeType, volumeAZ string, iops *int64) (string, error) {
|
||||
fullSnapshotName := getFullSnapshotName(b.subscription, b.resourceGroup, snapshotID)
|
||||
diskName := "restore-" + uuid.NewV4().String()
|
||||
|
||||
disk := disk.Model{
|
||||
Name: &diskName,
|
||||
Location: &op.location,
|
||||
Location: &b.location,
|
||||
Properties: &disk.Properties{
|
||||
CreationData: &disk.CreationData{
|
||||
CreateOption: disk.Copy,
|
||||
@@ -146,10 +165,10 @@ func (op *blockStorageAdapter) CreateVolumeFromSnapshot(snapshotID, volumeType,
|
||||
},
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), op.apiTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), b.apiTimeout)
|
||||
defer cancel()
|
||||
|
||||
_, errChan := op.disks.CreateOrUpdate(op.resourceGroup, *disk.Name, disk, ctx.Done())
|
||||
_, errChan := b.disks.CreateOrUpdate(b.resourceGroup, *disk.Name, disk, ctx.Done())
|
||||
|
||||
err := <-errChan
|
||||
|
||||
@@ -159,8 +178,8 @@ func (op *blockStorageAdapter) CreateVolumeFromSnapshot(snapshotID, volumeType,
|
||||
return diskName, nil
|
||||
}
|
||||
|
||||
func (op *blockStorageAdapter) GetVolumeInfo(volumeID, volumeAZ string) (string, *int64, error) {
|
||||
res, err := op.disks.Get(op.resourceGroup, volumeID)
|
||||
func (b *blockStore) GetVolumeInfo(volumeID, volumeAZ string) (string, *int64, error) {
|
||||
res, err := b.disks.Get(b.resourceGroup, volumeID)
|
||||
if err != nil {
|
||||
return "", nil, errors.WithStack(err)
|
||||
}
|
||||
@@ -168,8 +187,8 @@ func (op *blockStorageAdapter) GetVolumeInfo(volumeID, volumeAZ string) (string,
|
||||
return string(res.AccountType), nil, nil
|
||||
}
|
||||
|
||||
func (op *blockStorageAdapter) IsVolumeReady(volumeID, volumeAZ string) (ready bool, err error) {
|
||||
res, err := op.disks.Get(op.resourceGroup, volumeID)
|
||||
func (b *blockStore) IsVolumeReady(volumeID, volumeAZ string) (ready bool, err error) {
|
||||
res, err := b.disks.Get(b.resourceGroup, volumeID)
|
||||
if err != nil {
|
||||
return false, errors.WithStack(err)
|
||||
}
|
||||
@@ -181,42 +200,8 @@ func (op *blockStorageAdapter) IsVolumeReady(volumeID, volumeAZ string) (ready b
|
||||
return *res.ProvisioningState == "Succeeded", nil
|
||||
}
|
||||
|
||||
func (op *blockStorageAdapter) ListSnapshots(tagFilters map[string]string) ([]string, error) {
|
||||
res, err := op.snaps.ListByResourceGroup(op.resourceGroup)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
if res.Value == nil {
|
||||
return nil, errors.New("nil Value returned from ListByResourceGroup call")
|
||||
}
|
||||
|
||||
ret := make([]string, 0, len(*res.Value))
|
||||
Snapshot:
|
||||
for _, snap := range *res.Value {
|
||||
if snap.Tags == nil && len(tagFilters) > 0 {
|
||||
continue
|
||||
}
|
||||
if snap.ID == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Azure doesn't offer tag-filtering through the API so we have to manually
|
||||
// filter results. Require all filter keys to be present, with matching vals.
|
||||
for filterKey, filterVal := range tagFilters {
|
||||
if val, ok := (*snap.Tags)[filterKey]; !ok || val == nil || *val != filterVal {
|
||||
continue Snapshot
|
||||
}
|
||||
}
|
||||
|
||||
ret = append(ret, *snap.Name)
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (op *blockStorageAdapter) CreateSnapshot(volumeID, volumeAZ string, tags map[string]string) (string, error) {
|
||||
fullDiskName := getFullDiskName(op.subscription, op.resourceGroup, volumeID)
|
||||
func (b *blockStore) CreateSnapshot(volumeID, volumeAZ string, tags map[string]string) (string, error) {
|
||||
fullDiskName := getFullDiskName(b.subscription, b.resourceGroup, volumeID)
|
||||
// snapshot names must be <= 80 characters long
|
||||
var snapshotName string
|
||||
suffix := "-" + uuid.NewV4().String()
|
||||
@@ -236,7 +221,7 @@ func (op *blockStorageAdapter) CreateSnapshot(volumeID, volumeAZ string, tags ma
|
||||
},
|
||||
},
|
||||
Tags: &map[string]*string{},
|
||||
Location: &op.location,
|
||||
Location: &b.location,
|
||||
}
|
||||
|
||||
for k, v := range tags {
|
||||
@@ -244,10 +229,10 @@ func (op *blockStorageAdapter) CreateSnapshot(volumeID, volumeAZ string, tags ma
|
||||
(*snap.Tags)[k] = &val
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), op.apiTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), b.apiTimeout)
|
||||
defer cancel()
|
||||
|
||||
_, errChan := op.snaps.CreateOrUpdate(op.resourceGroup, *snap.Name, snap, ctx.Done())
|
||||
_, errChan := b.snaps.CreateOrUpdate(b.resourceGroup, *snap.Name, snap, ctx.Done())
|
||||
|
||||
err := <-errChan
|
||||
|
||||
@@ -258,11 +243,11 @@ func (op *blockStorageAdapter) CreateSnapshot(volumeID, volumeAZ string, tags ma
|
||||
return snapshotName, nil
|
||||
}
|
||||
|
||||
func (op *blockStorageAdapter) DeleteSnapshot(snapshotID string) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), op.apiTimeout)
|
||||
func (b *blockStore) DeleteSnapshot(snapshotID string) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), b.apiTimeout)
|
||||
defer cancel()
|
||||
|
||||
_, errChan := op.snaps.Delete(op.resourceGroup, snapshotID, ctx.Done())
|
||||
_, errChan := b.snaps.Delete(b.resourceGroup, snapshotID, ctx.Done())
|
||||
|
||||
err := <-errChan
|
||||
|
||||
@@ -276,3 +261,36 @@ func getFullDiskName(subscription string, resourceGroup string, diskName string)
|
||||
func getFullSnapshotName(subscription string, resourceGroup string, snapshotName string) string {
|
||||
return fmt.Sprintf("/subscriptions/%v/resourceGroups/%v/providers/Microsoft.Compute/snapshots/%v", subscription, resourceGroup, snapshotName)
|
||||
}
|
||||
|
||||
func (b *blockStore) GetVolumeID(pv runtime.Unstructured) (string, error) {
|
||||
if !collections.Exists(pv.UnstructuredContent(), "spec.azureDisk") {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
volumeID, err := collections.GetString(pv.UnstructuredContent(), "spec.azureDisk.diskName")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return volumeID, nil
|
||||
}
|
||||
|
||||
func (b *blockStore) SetVolumeID(pv runtime.Unstructured, volumeID string) (runtime.Unstructured, error) {
|
||||
azure, err := collections.GetMap(pv.UnstructuredContent(), "spec.azureDisk")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if uri, err := collections.GetString(azure, "diskURI"); err == nil {
|
||||
previousVolumeID, err := collections.GetString(azure, "diskName")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
azure["diskURI"] = strings.Replace(uri, previousVolumeID, volumeID, -1)
|
||||
}
|
||||
|
||||
azure["diskName"] = volumeID
|
||||
|
||||
return pv, nil
|
||||
}
|
||||
86
pkg/cloudprovider/azure/block_store_test.go
Normal file
86
pkg/cloudprovider/azure/block_store_test.go
Normal file
@@ -0,0 +1,86 @@
|
||||
/*
|
||||
Copyright 2017 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
func TestGetVolumeID(t *testing.T) {
|
||||
b := &blockStore{}
|
||||
|
||||
pv := &unstructured.Unstructured{}
|
||||
|
||||
// missing spec.azureDisk -> no error
|
||||
volumeID, err := b.GetVolumeID(pv)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "", volumeID)
|
||||
|
||||
// missing spec.azureDisk.diskName -> error
|
||||
azure := map[string]interface{}{}
|
||||
pv.Object["spec"] = map[string]interface{}{
|
||||
"azureDisk": azure,
|
||||
}
|
||||
volumeID, err = b.GetVolumeID(pv)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "", volumeID)
|
||||
|
||||
// valid
|
||||
azure["diskName"] = "foo"
|
||||
volumeID, err = b.GetVolumeID(pv)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "foo", volumeID)
|
||||
}
|
||||
|
||||
func TestSetVolumeID(t *testing.T) {
|
||||
b := &blockStore{}
|
||||
|
||||
pv := &unstructured.Unstructured{}
|
||||
|
||||
// missing spec.azureDisk -> error
|
||||
updatedPV, err := b.SetVolumeID(pv, "updated")
|
||||
require.Error(t, err)
|
||||
|
||||
// happy path, no diskURI
|
||||
azure := map[string]interface{}{}
|
||||
pv.Object["spec"] = map[string]interface{}{
|
||||
"azureDisk": azure,
|
||||
}
|
||||
updatedPV, err = b.SetVolumeID(pv, "updated")
|
||||
require.NoError(t, err)
|
||||
actual, err := collections.GetString(updatedPV.UnstructuredContent(), "spec.azureDisk.diskName")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "updated", actual)
|
||||
assert.NotContains(t, azure, "diskURI")
|
||||
|
||||
// with diskURI
|
||||
azure["diskURI"] = "/foo/bar/updated/blarg"
|
||||
updatedPV, err = b.SetVolumeID(pv, "revised")
|
||||
require.NoError(t, err)
|
||||
actual, err = collections.GetString(updatedPV.UnstructuredContent(), "spec.azureDisk.diskName")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "revised", actual)
|
||||
actual, err = collections.GetString(updatedPV.UnstructuredContent(), "spec.azureDisk.diskURI")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "/foo/bar/revised/blarg", actual)
|
||||
}
|
||||
@@ -27,31 +27,31 @@ import (
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
// ref. https://github.com/Azure-Samples/storage-blob-go-getting-started/blob/master/storageExample.go
|
||||
|
||||
type objectStorageAdapter struct {
|
||||
type objectStore struct {
|
||||
blobClient *storage.BlobStorageClient
|
||||
}
|
||||
|
||||
var _ cloudprovider.ObjectStorageAdapter = &objectStorageAdapter{}
|
||||
func NewObjectStore() cloudprovider.ObjectStore {
|
||||
return &objectStore{}
|
||||
}
|
||||
|
||||
func NewObjectStorageAdapter() (cloudprovider.ObjectStorageAdapter, error) {
|
||||
func (o *objectStore) Init(config map[string]string) error {
|
||||
cfg := getConfig()
|
||||
|
||||
storageClient, err := storage.NewBasicClient(cfg[azureStorageAccountIDKey], cfg[azureStorageKeyKey])
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
blobClient := storageClient.GetBlobService()
|
||||
|
||||
return &objectStorageAdapter{
|
||||
blobClient: &blobClient,
|
||||
}, nil
|
||||
o.blobClient = &blobClient
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (op *objectStorageAdapter) PutObject(bucket string, key string, body io.ReadSeeker) error {
|
||||
container, err := getContainerReference(op.blobClient, bucket)
|
||||
func (o *objectStore) PutObject(bucket string, key string, body io.Reader) error {
|
||||
container, err := getContainerReference(o.blobClient, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -61,24 +61,11 @@ func (op *objectStorageAdapter) PutObject(bucket string, key string, body io.Rea
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO having to seek to end/back to beginning to get
|
||||
// length here is ugly. refactor to make this better.
|
||||
len, err := body.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
blob.Properties.ContentLength = len
|
||||
|
||||
if _, err := body.Seek(0, 0); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
return errors.WithStack(blob.CreateBlockBlobFromReader(body, nil))
|
||||
}
|
||||
|
||||
func (op *objectStorageAdapter) GetObject(bucket string, key string) (io.ReadCloser, error) {
|
||||
container, err := getContainerReference(op.blobClient, bucket)
|
||||
func (o *objectStore) GetObject(bucket string, key string) (io.ReadCloser, error) {
|
||||
container, err := getContainerReference(o.blobClient, bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -96,8 +83,8 @@ func (op *objectStorageAdapter) GetObject(bucket string, key string) (io.ReadClo
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (op *objectStorageAdapter) ListCommonPrefixes(bucket string, delimiter string) ([]string, error) {
|
||||
container, err := getContainerReference(op.blobClient, bucket)
|
||||
func (o *objectStore) ListCommonPrefixes(bucket string, delimiter string) ([]string, error) {
|
||||
container, err := getContainerReference(o.blobClient, bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -121,8 +108,8 @@ func (op *objectStorageAdapter) ListCommonPrefixes(bucket string, delimiter stri
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (op *objectStorageAdapter) ListObjects(bucket, prefix string) ([]string, error) {
|
||||
container, err := getContainerReference(op.blobClient, bucket)
|
||||
func (o *objectStore) ListObjects(bucket, prefix string) ([]string, error) {
|
||||
container, err := getContainerReference(o.blobClient, bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -144,8 +131,8 @@ func (op *objectStorageAdapter) ListObjects(bucket, prefix string) ([]string, er
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (op *objectStorageAdapter) DeleteObject(bucket string, key string) error {
|
||||
container, err := getContainerReference(op.blobClient, bucket)
|
||||
func (o *objectStore) DeleteObject(bucket string, key string) error {
|
||||
container, err := getContainerReference(o.blobClient, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -160,8 +147,8 @@ func (op *objectStorageAdapter) DeleteObject(bucket string, key string) error {
|
||||
|
||||
const sasURIReadPermission = "r"
|
||||
|
||||
func (op *objectStorageAdapter) CreateSignedURL(bucket, key string, ttl time.Duration) (string, error) {
|
||||
container, err := getContainerReference(op.blobClient, bucket)
|
||||
func (o *objectStore) CreateSignedURL(bucket, key string, ttl time.Duration) (string, error) {
|
||||
container, err := getContainerReference(o.blobClient, bucket)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -31,7 +31,7 @@ import (
|
||||
kerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
|
||||
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/generated/clientset/scheme"
|
||||
"github.com/heptio/ark/pkg/generated/clientset/versioned/scheme"
|
||||
)
|
||||
|
||||
// BackupService contains methods for working with backups in object storage.
|
||||
@@ -40,7 +40,7 @@ type BackupService interface {
|
||||
// UploadBackup uploads the specified Ark backup of a set of Kubernetes API objects, whose manifests are
|
||||
// stored in the specified file, into object storage in an Ark bucket, tagged with Ark metadata. Returns
|
||||
// an error if a problem is encountered accessing the file or performing the upload via the cloud API.
|
||||
UploadBackup(bucket, name string, metadata, backup, log io.ReadSeeker) error
|
||||
UploadBackup(bucket, name string, metadata, backup, log io.Reader) error
|
||||
|
||||
// DownloadBackup downloads an Ark backup with the specified object key from object storage via the cloud API.
|
||||
// It returns the snapshot metadata and data (separately), or an error if a problem is encountered
|
||||
@@ -58,7 +58,10 @@ type BackupService interface {
|
||||
CreateSignedURL(target api.DownloadTarget, bucket string, ttl time.Duration) (string, error)
|
||||
|
||||
// UploadRestoreLog uploads the restore's log file to object storage.
|
||||
UploadRestoreLog(bucket, backup, restore string, log io.ReadSeeker) error
|
||||
UploadRestoreLog(bucket, backup, restore string, log io.Reader) error
|
||||
|
||||
// UploadRestoreResults uploads the restore's results file to object storage.
|
||||
UploadRestoreResults(bucket, backup, restore string, results io.Reader) error
|
||||
}
|
||||
|
||||
// BackupGetter knows how to list backups in object storage.
|
||||
@@ -68,10 +71,11 @@ type BackupGetter interface {
|
||||
}
|
||||
|
||||
const (
|
||||
metadataFileFormatString = "%s/ark-backup.json"
|
||||
backupFileFormatString = "%s/%s.tar.gz"
|
||||
backupLogFileFormatString = "%s/%s-logs.gz"
|
||||
restoreLogFileFormatString = "%s/restore-%s-logs.gz"
|
||||
metadataFileFormatString = "%s/ark-backup.json"
|
||||
backupFileFormatString = "%s/%s.tar.gz"
|
||||
backupLogFileFormatString = "%s/%s-logs.gz"
|
||||
restoreLogFileFormatString = "%s/restore-%s-logs.gz"
|
||||
restoreResultsFileFormatString = "%s/restore-%s-results.gz"
|
||||
)
|
||||
|
||||
func getMetadataKey(backup string) string {
|
||||
@@ -90,36 +94,40 @@ func getRestoreLogKey(backup, restore string) string {
|
||||
return fmt.Sprintf(restoreLogFileFormatString, backup, restore)
|
||||
}
|
||||
|
||||
func getRestoreResultsKey(backup, restore string) string {
|
||||
return fmt.Sprintf(restoreResultsFileFormatString, backup, restore)
|
||||
}
|
||||
|
||||
type backupService struct {
|
||||
objectStorage ObjectStorageAdapter
|
||||
decoder runtime.Decoder
|
||||
logger *logrus.Logger
|
||||
objectStore ObjectStore
|
||||
decoder runtime.Decoder
|
||||
logger *logrus.Logger
|
||||
}
|
||||
|
||||
var _ BackupService = &backupService{}
|
||||
var _ BackupGetter = &backupService{}
|
||||
|
||||
// NewBackupService creates a backup service using the provided object storage adapter
|
||||
func NewBackupService(objectStorage ObjectStorageAdapter, logger *logrus.Logger) BackupService {
|
||||
// NewBackupService creates a backup service using the provided object store
|
||||
func NewBackupService(objectStore ObjectStore, logger *logrus.Logger) BackupService {
|
||||
return &backupService{
|
||||
objectStorage: objectStorage,
|
||||
decoder: scheme.Codecs.UniversalDecoder(api.SchemeGroupVersion),
|
||||
logger: logger,
|
||||
objectStore: objectStore,
|
||||
decoder: scheme.Codecs.UniversalDecoder(api.SchemeGroupVersion),
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
func (br *backupService) UploadBackup(bucket, backupName string, metadata, backup, log io.ReadSeeker) error {
|
||||
func (br *backupService) UploadBackup(bucket, backupName string, metadata, backup, log io.Reader) error {
|
||||
// upload metadata file
|
||||
metadataKey := getMetadataKey(backupName)
|
||||
if err := br.objectStorage.PutObject(bucket, metadataKey, metadata); err != nil {
|
||||
if err := br.objectStore.PutObject(bucket, metadataKey, metadata); err != nil {
|
||||
// failure to upload metadata file is a hard-stop
|
||||
return err
|
||||
}
|
||||
|
||||
// upload tar file
|
||||
if err := br.objectStorage.PutObject(bucket, getBackupContentsKey(backupName), backup); err != nil {
|
||||
if err := br.objectStore.PutObject(bucket, getBackupContentsKey(backupName), backup); err != nil {
|
||||
// try to delete the metadata file since the data upload failed
|
||||
deleteErr := br.objectStorage.DeleteObject(bucket, metadataKey)
|
||||
deleteErr := br.objectStore.DeleteObject(bucket, metadataKey)
|
||||
|
||||
return kerrors.NewAggregate([]error{err, deleteErr})
|
||||
}
|
||||
@@ -127,7 +135,7 @@ func (br *backupService) UploadBackup(bucket, backupName string, metadata, backu
|
||||
// uploading log file is best-effort; if it fails, we log the error but call the overall upload a
|
||||
// success
|
||||
logKey := getBackupLogKey(backupName)
|
||||
if err := br.objectStorage.PutObject(bucket, logKey, log); err != nil {
|
||||
if err := br.objectStore.PutObject(bucket, logKey, log); err != nil {
|
||||
br.logger.WithError(err).WithFields(logrus.Fields{
|
||||
"bucket": bucket,
|
||||
"key": logKey,
|
||||
@@ -138,11 +146,11 @@ func (br *backupService) UploadBackup(bucket, backupName string, metadata, backu
|
||||
}
|
||||
|
||||
func (br *backupService) DownloadBackup(bucket, backupName string) (io.ReadCloser, error) {
|
||||
return br.objectStorage.GetObject(bucket, getBackupContentsKey(backupName))
|
||||
return br.objectStore.GetObject(bucket, getBackupContentsKey(backupName))
|
||||
}
|
||||
|
||||
func (br *backupService) GetAllBackups(bucket string) ([]*api.Backup, error) {
|
||||
prefixes, err := br.objectStorage.ListCommonPrefixes(bucket, "/")
|
||||
prefixes, err := br.objectStore.ListCommonPrefixes(bucket, "/")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -168,7 +176,7 @@ func (br *backupService) GetAllBackups(bucket string) ([]*api.Backup, error) {
|
||||
func (br *backupService) GetBackup(bucket, name string) (*api.Backup, error) {
|
||||
key := fmt.Sprintf(metadataFileFormatString, name)
|
||||
|
||||
res, err := br.objectStorage.GetObject(bucket, key)
|
||||
res, err := br.objectStore.GetObject(bucket, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -193,7 +201,7 @@ func (br *backupService) GetBackup(bucket, name string) (*api.Backup, error) {
|
||||
}
|
||||
|
||||
func (br *backupService) DeleteBackupDir(bucket, backupName string) error {
|
||||
objects, err := br.objectStorage.ListObjects(bucket, backupName+"/")
|
||||
objects, err := br.objectStore.ListObjects(bucket, backupName+"/")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -204,7 +212,7 @@ func (br *backupService) DeleteBackupDir(bucket, backupName string) error {
|
||||
"bucket": bucket,
|
||||
"key": key,
|
||||
}).Debug("Trying to delete object")
|
||||
if err := br.objectStorage.DeleteObject(bucket, key); err != nil {
|
||||
if err := br.objectStore.DeleteObject(bucket, key); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
@@ -215,25 +223,37 @@ func (br *backupService) DeleteBackupDir(bucket, backupName string) error {
|
||||
func (br *backupService) CreateSignedURL(target api.DownloadTarget, bucket string, ttl time.Duration) (string, error) {
|
||||
switch target.Kind {
|
||||
case api.DownloadTargetKindBackupContents:
|
||||
return br.objectStorage.CreateSignedURL(bucket, getBackupContentsKey(target.Name), ttl)
|
||||
return br.objectStore.CreateSignedURL(bucket, getBackupContentsKey(target.Name), ttl)
|
||||
case api.DownloadTargetKindBackupLog:
|
||||
return br.objectStorage.CreateSignedURL(bucket, getBackupLogKey(target.Name), ttl)
|
||||
return br.objectStore.CreateSignedURL(bucket, getBackupLogKey(target.Name), ttl)
|
||||
case api.DownloadTargetKindRestoreLog:
|
||||
// restore name is formatted as <backup name>-<timestamp>
|
||||
i := strings.LastIndex(target.Name, "-")
|
||||
if i < 0 {
|
||||
i = len(target.Name)
|
||||
}
|
||||
backup := target.Name[0:i]
|
||||
return br.objectStorage.CreateSignedURL(bucket, getRestoreLogKey(backup, target.Name), ttl)
|
||||
backup := extractBackupName(target.Name)
|
||||
return br.objectStore.CreateSignedURL(bucket, getRestoreLogKey(backup, target.Name), ttl)
|
||||
case api.DownloadTargetKindRestoreResults:
|
||||
backup := extractBackupName(target.Name)
|
||||
return br.objectStore.CreateSignedURL(bucket, getRestoreResultsKey(backup, target.Name), ttl)
|
||||
default:
|
||||
return "", errors.Errorf("unsupported download target kind %q", target.Kind)
|
||||
}
|
||||
}
|
||||
|
||||
func (br *backupService) UploadRestoreLog(bucket, backup, restore string, log io.ReadSeeker) error {
|
||||
func extractBackupName(s string) string {
|
||||
// restore name is formatted as <backup name>-<timestamp>
|
||||
i := strings.LastIndex(s, "-")
|
||||
if i < 0 {
|
||||
i = len(s)
|
||||
}
|
||||
return s[0:i]
|
||||
}
|
||||
|
||||
func (br *backupService) UploadRestoreLog(bucket, backup, restore string, log io.Reader) error {
|
||||
key := getRestoreLogKey(backup, restore)
|
||||
return br.objectStorage.PutObject(bucket, key, log)
|
||||
return br.objectStore.PutObject(bucket, key, log)
|
||||
}
|
||||
|
||||
func (br *backupService) UploadRestoreResults(bucket, backup, restore string, results io.Reader) error {
|
||||
key := getRestoreResultsKey(backup, restore)
|
||||
return br.objectStore.PutObject(bucket, key, results)
|
||||
}
|
||||
|
||||
// cachedBackupService wraps a real backup service with a cache for getting cloud backups.
|
||||
|
||||
@@ -82,7 +82,7 @@ func TestUploadBackup(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
var (
|
||||
objStore = &testutil.ObjectStorageAdapter{}
|
||||
objStore = &testutil.ObjectStore{}
|
||||
bucket = "test-bucket"
|
||||
backupName = "test-backup"
|
||||
logger, _ = testlogger.NewNullLogger()
|
||||
@@ -118,7 +118,7 @@ func TestUploadBackup(t *testing.T) {
|
||||
|
||||
func TestDownloadBackup(t *testing.T) {
|
||||
var (
|
||||
o = &testutil.ObjectStorageAdapter{}
|
||||
o = &testutil.ObjectStore{}
|
||||
bucket = "b"
|
||||
backup = "bak"
|
||||
logger, _ = testlogger.NewNullLogger()
|
||||
@@ -158,7 +158,7 @@ func TestDeleteBackup(t *testing.T) {
|
||||
bucket = "bucket"
|
||||
backup = "bak"
|
||||
objects = []string{"bak/ark-backup.json", "bak/bak.tar.gz", "bak/bak.log.gz"}
|
||||
objStore = &testutil.ObjectStorageAdapter{}
|
||||
objStore = &testutil.ObjectStore{}
|
||||
logger, _ = testlogger.NewNullLogger()
|
||||
)
|
||||
|
||||
@@ -201,11 +201,11 @@ func TestGetAllBackups(t *testing.T) {
|
||||
"backup-2/ark-backup.json": encodeToBytes(&api.Backup{ObjectMeta: metav1.ObjectMeta{Name: "backup-2"}}),
|
||||
},
|
||||
expectedRes: []*api.Backup{
|
||||
&api.Backup{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{Kind: "Backup", APIVersion: "ark.heptio.com/v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "backup-1"},
|
||||
},
|
||||
&api.Backup{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{Kind: "Backup", APIVersion: "ark.heptio.com/v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "backup-2"},
|
||||
},
|
||||
@@ -218,7 +218,7 @@ func TestGetAllBackups(t *testing.T) {
|
||||
"backup-2/ark-backup.json": []byte("this is not valid backup JSON"),
|
||||
},
|
||||
expectedRes: []*api.Backup{
|
||||
&api.Backup{
|
||||
{
|
||||
TypeMeta: metav1.TypeMeta{Kind: "Backup", APIVersion: "ark.heptio.com/v1"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "backup-1"},
|
||||
},
|
||||
@@ -230,7 +230,7 @@ func TestGetAllBackups(t *testing.T) {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
var (
|
||||
bucket = "bucket"
|
||||
objStore = &testutil.ObjectStorageAdapter{}
|
||||
objStore = &testutil.ObjectStore{}
|
||||
logger, _ = testlogger.NewNullLogger()
|
||||
)
|
||||
|
||||
@@ -304,12 +304,30 @@ func TestCreateSignedURL(t *testing.T) {
|
||||
targetName: "b-cool-20170913154901-20170913154902",
|
||||
expectedKey: "b-cool-20170913154901/restore-b-cool-20170913154901-20170913154902-logs.gz",
|
||||
},
|
||||
{
|
||||
name: "restore results - backup has no dash",
|
||||
targetKind: api.DownloadTargetKindRestoreResults,
|
||||
targetName: "b-20170913154901",
|
||||
expectedKey: "b/restore-b-20170913154901-results.gz",
|
||||
},
|
||||
{
|
||||
name: "restore results - backup has 1 dash",
|
||||
targetKind: api.DownloadTargetKindRestoreResults,
|
||||
targetName: "b-cool-20170913154901",
|
||||
expectedKey: "b-cool/restore-b-cool-20170913154901-results.gz",
|
||||
},
|
||||
{
|
||||
name: "restore results - backup has multiple dashes (e.g. restore of scheduled backup)",
|
||||
targetKind: api.DownloadTargetKindRestoreResults,
|
||||
targetName: "b-cool-20170913154901-20170913154902",
|
||||
expectedKey: "b-cool-20170913154901/restore-b-cool-20170913154901-20170913154902-results.gz",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
var (
|
||||
objectStorage = &testutil.ObjectStorageAdapter{}
|
||||
objectStorage = &testutil.ObjectStore{}
|
||||
logger, _ = testlogger.NewNullLogger()
|
||||
backupService = NewBackupService(objectStorage, logger)
|
||||
)
|
||||
|
||||
@@ -17,7 +17,6 @@ limitations under the License.
|
||||
package gcp
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -26,51 +25,59 @@ import (
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/compute/v0.beta"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
)
|
||||
|
||||
type blockStorageAdapter struct {
|
||||
const projectKey = "project"
|
||||
|
||||
type blockStore struct {
|
||||
gce *compute.Service
|
||||
project string
|
||||
}
|
||||
|
||||
var _ cloudprovider.BlockStorageAdapter = &blockStorageAdapter{}
|
||||
func NewBlockStore() cloudprovider.BlockStore {
|
||||
return &blockStore{}
|
||||
}
|
||||
|
||||
func (b *blockStore) Init(config map[string]string) error {
|
||||
project := config[projectKey]
|
||||
|
||||
func NewBlockStorageAdapter(project string) (cloudprovider.BlockStorageAdapter, error) {
|
||||
if project == "" {
|
||||
return nil, errors.New("missing project in gcp configuration in config file")
|
||||
return errors.Errorf("missing %s in gcp configuration", projectKey)
|
||||
}
|
||||
|
||||
client, err := google.DefaultClient(oauth2.NoContext, compute.ComputeScope)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
gce, err := compute.New(client)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
// validate project
|
||||
res, err := gce.Projects.Get(project).Do()
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
if res == nil {
|
||||
return nil, errors.Errorf("error getting project %q", project)
|
||||
return errors.Errorf("error getting project %q", project)
|
||||
}
|
||||
|
||||
return &blockStorageAdapter{
|
||||
gce: gce,
|
||||
project: project,
|
||||
}, nil
|
||||
b.gce = gce
|
||||
b.project = project
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (op *blockStorageAdapter) CreateVolumeFromSnapshot(snapshotID, volumeType, volumeAZ string, iops *int64) (volumeID string, err error) {
|
||||
res, err := op.gce.Snapshots.Get(op.project, snapshotID).Do()
|
||||
func (b *blockStore) CreateVolumeFromSnapshot(snapshotID, volumeType, volumeAZ string, iops *int64) (volumeID string, err error) {
|
||||
res, err := b.gce.Snapshots.Get(b.project, snapshotID).Do()
|
||||
if err != nil {
|
||||
return "", errors.WithStack(err)
|
||||
}
|
||||
@@ -81,15 +88,15 @@ func (op *blockStorageAdapter) CreateVolumeFromSnapshot(snapshotID, volumeType,
|
||||
Type: volumeType,
|
||||
}
|
||||
|
||||
if _, err = op.gce.Disks.Insert(op.project, volumeAZ, disk).Do(); err != nil {
|
||||
if _, err = b.gce.Disks.Insert(b.project, volumeAZ, disk).Do(); err != nil {
|
||||
return "", errors.WithStack(err)
|
||||
}
|
||||
|
||||
return disk.Name, nil
|
||||
}
|
||||
|
||||
func (op *blockStorageAdapter) GetVolumeInfo(volumeID, volumeAZ string) (string, *int64, error) {
|
||||
res, err := op.gce.Disks.Get(op.project, volumeAZ, volumeID).Do()
|
||||
func (b *blockStore) GetVolumeInfo(volumeID, volumeAZ string) (string, *int64, error) {
|
||||
res, err := b.gce.Disks.Get(b.project, volumeAZ, volumeID).Do()
|
||||
if err != nil {
|
||||
return "", nil, errors.WithStack(err)
|
||||
}
|
||||
@@ -97,8 +104,8 @@ func (op *blockStorageAdapter) GetVolumeInfo(volumeID, volumeAZ string) (string,
|
||||
return res.Type, nil, nil
|
||||
}
|
||||
|
||||
func (op *blockStorageAdapter) IsVolumeReady(volumeID, volumeAZ string) (ready bool, err error) {
|
||||
disk, err := op.gce.Disks.Get(op.project, volumeAZ, volumeID).Do()
|
||||
func (b *blockStore) IsVolumeReady(volumeID, volumeAZ string) (ready bool, err error) {
|
||||
disk, err := b.gce.Disks.Get(b.project, volumeAZ, volumeID).Do()
|
||||
if err != nil {
|
||||
return false, errors.WithStack(err)
|
||||
}
|
||||
@@ -107,34 +114,7 @@ func (op *blockStorageAdapter) IsVolumeReady(volumeID, volumeAZ string) (ready b
|
||||
return disk.Status == "READY", nil
|
||||
}
|
||||
|
||||
func (op *blockStorageAdapter) ListSnapshots(tagFilters map[string]string) ([]string, error) {
|
||||
useParentheses := len(tagFilters) > 1
|
||||
subFilters := make([]string, 0, len(tagFilters))
|
||||
|
||||
for k, v := range tagFilters {
|
||||
fs := k + " eq " + v
|
||||
if useParentheses {
|
||||
fs = "(" + fs + ")"
|
||||
}
|
||||
subFilters = append(subFilters, fs)
|
||||
}
|
||||
|
||||
filter := strings.Join(subFilters, " ")
|
||||
|
||||
res, err := op.gce.Snapshots.List(op.project).Filter(filter).Do()
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
ret := make([]string, 0, len(res.Items))
|
||||
for _, snap := range res.Items {
|
||||
ret = append(ret, snap.Name)
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (op *blockStorageAdapter) CreateSnapshot(volumeID, volumeAZ string, tags map[string]string) (string, error) {
|
||||
func (b *blockStore) CreateSnapshot(volumeID, volumeAZ string, tags map[string]string) (string, error) {
|
||||
// snapshot names must adhere to RFC1035 and be 1-63 characters
|
||||
// long
|
||||
var snapshotName string
|
||||
@@ -150,7 +130,7 @@ func (op *blockStorageAdapter) CreateSnapshot(volumeID, volumeAZ string, tags ma
|
||||
Name: snapshotName,
|
||||
}
|
||||
|
||||
_, err := op.gce.Disks.CreateSnapshot(op.project, volumeAZ, volumeID, &gceSnap).Do()
|
||||
_, err := b.gce.Disks.CreateSnapshot(b.project, volumeAZ, volumeID, &gceSnap).Do()
|
||||
if err != nil {
|
||||
return "", errors.WithStack(err)
|
||||
}
|
||||
@@ -158,7 +138,7 @@ func (op *blockStorageAdapter) CreateSnapshot(volumeID, volumeAZ string, tags ma
|
||||
// the snapshot is not immediately available after creation for putting labels
|
||||
// on it. poll for a period of time.
|
||||
if pollErr := wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) {
|
||||
if res, err := op.gce.Snapshots.Get(op.project, gceSnap.Name).Do(); err == nil {
|
||||
if res, err := b.gce.Snapshots.Get(b.project, gceSnap.Name).Do(); err == nil {
|
||||
gceSnap = *res
|
||||
return true, nil
|
||||
}
|
||||
@@ -172,7 +152,7 @@ func (op *blockStorageAdapter) CreateSnapshot(volumeID, volumeAZ string, tags ma
|
||||
LabelFingerprint: gceSnap.LabelFingerprint,
|
||||
}
|
||||
|
||||
_, err = op.gce.Snapshots.SetLabels(op.project, gceSnap.Name, labels).Do()
|
||||
_, err = b.gce.Snapshots.SetLabels(b.project, gceSnap.Name, labels).Do()
|
||||
if err != nil {
|
||||
return "", errors.WithStack(err)
|
||||
}
|
||||
@@ -180,8 +160,32 @@ func (op *blockStorageAdapter) CreateSnapshot(volumeID, volumeAZ string, tags ma
|
||||
return gceSnap.Name, nil
|
||||
}
|
||||
|
||||
func (op *blockStorageAdapter) DeleteSnapshot(snapshotID string) error {
|
||||
_, err := op.gce.Snapshots.Delete(op.project, snapshotID).Do()
|
||||
func (b *blockStore) DeleteSnapshot(snapshotID string) error {
|
||||
_, err := b.gce.Snapshots.Delete(b.project, snapshotID).Do()
|
||||
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
func (b *blockStore) GetVolumeID(pv runtime.Unstructured) (string, error) {
|
||||
if !collections.Exists(pv.UnstructuredContent(), "spec.gcePersistentDisk") {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
volumeID, err := collections.GetString(pv.UnstructuredContent(), "spec.gcePersistentDisk.pdName")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return volumeID, nil
|
||||
}
|
||||
|
||||
func (b *blockStore) SetVolumeID(pv runtime.Unstructured, volumeID string) (runtime.Unstructured, error) {
|
||||
gce, err := collections.GetMap(pv.UnstructuredContent(), "spec.gcePersistentDisk")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gce["pdName"] = volumeID
|
||||
|
||||
return pv, nil
|
||||
}
|
||||
74
pkg/cloudprovider/gcp/block_store_test.go
Normal file
74
pkg/cloudprovider/gcp/block_store_test.go
Normal file
@@ -0,0 +1,74 @@
|
||||
/*
|
||||
Copyright 2017 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gcp
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
func TestGetVolumeID(t *testing.T) {
|
||||
b := &blockStore{}
|
||||
|
||||
pv := &unstructured.Unstructured{}
|
||||
|
||||
// missing spec.gcePersistentDisk -> no error
|
||||
volumeID, err := b.GetVolumeID(pv)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "", volumeID)
|
||||
|
||||
// missing spec.gcePersistentDisk.pdName -> error
|
||||
gce := map[string]interface{}{}
|
||||
pv.Object["spec"] = map[string]interface{}{
|
||||
"gcePersistentDisk": gce,
|
||||
}
|
||||
volumeID, err = b.GetVolumeID(pv)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, "", volumeID)
|
||||
|
||||
// valid
|
||||
gce["pdName"] = "abc123"
|
||||
volumeID, err = b.GetVolumeID(pv)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "abc123", volumeID)
|
||||
}
|
||||
|
||||
func TestSetVolumeID(t *testing.T) {
|
||||
b := &blockStore{}
|
||||
|
||||
pv := &unstructured.Unstructured{}
|
||||
|
||||
// missing spec.gcePersistentDisk -> error
|
||||
updatedPV, err := b.SetVolumeID(pv, "abc123")
|
||||
require.Error(t, err)
|
||||
|
||||
// happy path
|
||||
gce := map[string]interface{}{}
|
||||
pv.Object["spec"] = map[string]interface{}{
|
||||
"gcePersistentDisk": gce,
|
||||
}
|
||||
updatedPV, err = b.SetVolumeID(pv, "123abc")
|
||||
require.NoError(t, err)
|
||||
actual, err := collections.GetString(updatedPV.UnstructuredContent(), "spec.gcePersistentDisk.pdName")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "123abc", actual)
|
||||
}
|
||||
@@ -1,127 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 Heptio Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gcp
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
// TODO switch to using newstorage
|
||||
newstorage "cloud.google.com/go/storage"
|
||||
storage "google.golang.org/api/storage/v1"
|
||||
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
type objectStorageAdapter struct {
|
||||
gcs *storage.Service
|
||||
googleAccessID string
|
||||
privateKey []byte
|
||||
}
|
||||
|
||||
var _ cloudprovider.ObjectStorageAdapter = &objectStorageAdapter{}
|
||||
|
||||
func NewObjectStorageAdapter(googleAccessID string, privateKey []byte) (cloudprovider.ObjectStorageAdapter, error) {
|
||||
client, err := google.DefaultClient(oauth2.NoContext, storage.DevstorageReadWriteScope)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
gcs, err := storage.New(client)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
return &objectStorageAdapter{
|
||||
gcs: gcs,
|
||||
googleAccessID: googleAccessID,
|
||||
privateKey: privateKey,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (op *objectStorageAdapter) PutObject(bucket string, key string, body io.ReadSeeker) error {
|
||||
obj := &storage.Object{
|
||||
Name: key,
|
||||
}
|
||||
|
||||
_, err := op.gcs.Objects.Insert(bucket, obj).Media(body).Do()
|
||||
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
func (op *objectStorageAdapter) GetObject(bucket string, key string) (io.ReadCloser, error) {
|
||||
res, err := op.gcs.Objects.Get(bucket, key).Download()
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
return res.Body, nil
|
||||
}
|
||||
|
||||
func (op *objectStorageAdapter) ListCommonPrefixes(bucket string, delimiter string) ([]string, error) {
|
||||
res, err := op.gcs.Objects.List(bucket).Delimiter(delimiter).Do()
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
// GCP returns prefixes inclusive of the last delimiter. We need to strip
|
||||
// it.
|
||||
ret := make([]string, 0, len(res.Prefixes))
|
||||
for _, prefix := range res.Prefixes {
|
||||
ret = append(ret, prefix[0:strings.LastIndex(prefix, delimiter)])
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (op *objectStorageAdapter) ListObjects(bucket, prefix string) ([]string, error) {
|
||||
res, err := op.gcs.Objects.List(bucket).Prefix(prefix).Do()
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
ret := make([]string, 0, len(res.Items))
|
||||
for _, item := range res.Items {
|
||||
ret = append(ret, item.Name)
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (op *objectStorageAdapter) DeleteObject(bucket string, key string) error {
|
||||
return errors.Wrapf(op.gcs.Objects.Delete(bucket, key).Do(), "error deleting object %s", key)
|
||||
}
|
||||
|
||||
func (op *objectStorageAdapter) CreateSignedURL(bucket, key string, ttl time.Duration) (string, error) {
|
||||
if op.googleAccessID == "" {
|
||||
return "", errors.New("unable to create a pre-signed URL - make sure GOOGLE_APPLICATION_CREDENTIALS points to a valid GCE service account file (missing email address)")
|
||||
}
|
||||
if len(op.privateKey) == 0 {
|
||||
return "", errors.New("unable to create a pre-signed URL - make sure GOOGLE_APPLICATION_CREDENTIALS points to a valid GCE service account file (missing private key)")
|
||||
}
|
||||
|
||||
return newstorage.SignedURL(bucket, key, &newstorage.SignedURLOptions{
|
||||
GoogleAccessID: op.googleAccessID,
|
||||
PrivateKey: op.privateKey,
|
||||
Method: "GET",
|
||||
Expires: time.Now().Add(ttl),
|
||||
})
|
||||
}
|
||||
147
pkg/cloudprovider/gcp/object_store.go
Normal file
147
pkg/cloudprovider/gcp/object_store.go
Normal file
@@ -0,0 +1,147 @@
|
||||
/*
|
||||
Copyright 2017 Heptio Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gcp
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
// TODO switch to using newstorage
|
||||
newstorage "cloud.google.com/go/storage"
|
||||
storage "google.golang.org/api/storage/v1"
|
||||
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
)
|
||||
|
||||
const credentialsEnvVar = "GOOGLE_APPLICATION_CREDENTIALS"
|
||||
|
||||
type objectStore struct {
|
||||
gcs *storage.Service
|
||||
googleAccessID string
|
||||
privateKey []byte
|
||||
}
|
||||
|
||||
func NewObjectStore() cloudprovider.ObjectStore {
|
||||
return &objectStore{}
|
||||
}
|
||||
|
||||
func (o *objectStore) Init(config map[string]string) error {
|
||||
credentialsFile := os.Getenv(credentialsEnvVar)
|
||||
if credentialsFile == "" {
|
||||
return errors.Errorf("%s is undefined", credentialsEnvVar)
|
||||
}
|
||||
|
||||
// Get the email and private key from the credentials file so we can pre-sign download URLs
|
||||
creds, err := ioutil.ReadFile(credentialsFile)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
jwtConfig, err := google.JWTConfigFromJSON(creds)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if jwtConfig.Email == "" {
|
||||
return errors.Errorf("credentials file pointed to by %s does not contain an email", credentialsEnvVar)
|
||||
}
|
||||
if len(jwtConfig.PrivateKey) == 0 {
|
||||
return errors.Errorf("credentials file pointed to by %s does not contain a private key", credentialsEnvVar)
|
||||
}
|
||||
|
||||
client, err := google.DefaultClient(oauth2.NoContext, storage.DevstorageReadWriteScope)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
gcs, err := storage.New(client)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
o.gcs = gcs
|
||||
o.googleAccessID = jwtConfig.Email
|
||||
o.privateKey = jwtConfig.PrivateKey
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *objectStore) PutObject(bucket string, key string, body io.Reader) error {
|
||||
obj := &storage.Object{
|
||||
Name: key,
|
||||
}
|
||||
|
||||
_, err := o.gcs.Objects.Insert(bucket, obj).Media(body).Do()
|
||||
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
func (o *objectStore) GetObject(bucket string, key string) (io.ReadCloser, error) {
|
||||
res, err := o.gcs.Objects.Get(bucket, key).Download()
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
return res.Body, nil
|
||||
}
|
||||
|
||||
func (o *objectStore) ListCommonPrefixes(bucket string, delimiter string) ([]string, error) {
|
||||
res, err := o.gcs.Objects.List(bucket).Delimiter(delimiter).Do()
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
// GCP returns prefixes inclusive of the last delimiter. We need to strip
|
||||
// it.
|
||||
ret := make([]string, 0, len(res.Prefixes))
|
||||
for _, prefix := range res.Prefixes {
|
||||
ret = append(ret, prefix[0:strings.LastIndex(prefix, delimiter)])
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (o *objectStore) ListObjects(bucket, prefix string) ([]string, error) {
|
||||
res, err := o.gcs.Objects.List(bucket).Prefix(prefix).Do()
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
ret := make([]string, 0, len(res.Items))
|
||||
for _, item := range res.Items {
|
||||
ret = append(ret, item.Name)
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (o *objectStore) DeleteObject(bucket string, key string) error {
|
||||
return errors.Wrapf(o.gcs.Objects.Delete(bucket, key).Do(), "error deleting object %s", key)
|
||||
}
|
||||
|
||||
func (o *objectStore) CreateSignedURL(bucket, key string, ttl time.Duration) (string, error) {
|
||||
return newstorage.SignedURL(bucket, key, &newstorage.SignedURLOptions{
|
||||
GoogleAccessID: o.googleAccessID,
|
||||
PrivateKey: o.privateKey,
|
||||
Method: "GET",
|
||||
Expires: time.Now().Add(ttl),
|
||||
})
|
||||
}
|
||||
@@ -20,16 +20,12 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// SnapshotService exposes Ark-specific operations for snapshotting and restoring block
|
||||
// volumes.
|
||||
type SnapshotService interface {
|
||||
// GetAllSnapshots returns a slice of all snapshots found in the cloud API that
|
||||
// are tagged with Ark metadata. Returns an error if a problem is encountered accessing
|
||||
// the cloud API.
|
||||
GetAllSnapshots() ([]string, error)
|
||||
|
||||
// CreateSnapshot triggers a snapshot for the specified cloud volume and tags it with metadata.
|
||||
// it returns the cloud snapshot ID, or an error if a problem is encountered triggering the snapshot via
|
||||
// the cloud API.
|
||||
@@ -46,6 +42,12 @@ type SnapshotService interface {
|
||||
|
||||
// GetVolumeInfo gets the type and IOPS (if applicable) from the cloud API.
|
||||
GetVolumeInfo(volumeID, volumeAZ string) (string, *int64, error)
|
||||
|
||||
// GetVolumeID returns the cloud provider specific identifier for the PersistentVolume.
|
||||
GetVolumeID(pv runtime.Unstructured) (string, error)
|
||||
|
||||
// SetVolumeID sets the cloud provider specific identifier for the PersistentVolume.
|
||||
SetVolumeID(pv runtime.Unstructured, volumeID string) (runtime.Unstructured, error)
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -56,20 +58,20 @@ const (
|
||||
)
|
||||
|
||||
type snapshotService struct {
|
||||
blockStorage BlockStorageAdapter
|
||||
blockStore BlockStore
|
||||
}
|
||||
|
||||
var _ SnapshotService = &snapshotService{}
|
||||
|
||||
// NewSnapshotService creates a snapshot service using the provided block storage adapter
|
||||
func NewSnapshotService(blockStorage BlockStorageAdapter) SnapshotService {
|
||||
// NewSnapshotService creates a snapshot service using the provided block store
|
||||
func NewSnapshotService(blockStore BlockStore) SnapshotService {
|
||||
return &snapshotService{
|
||||
blockStorage: blockStorage,
|
||||
blockStore: blockStore,
|
||||
}
|
||||
}
|
||||
|
||||
func (sr *snapshotService) CreateVolumeFromSnapshot(snapshotID string, volumeType string, volumeAZ string, iops *int64) (string, error) {
|
||||
volumeID, err := sr.blockStorage.CreateVolumeFromSnapshot(snapshotID, volumeType, volumeAZ, iops)
|
||||
volumeID, err := sr.blockStore.CreateVolumeFromSnapshot(snapshotID, volumeType, volumeAZ, iops)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -85,38 +87,33 @@ func (sr *snapshotService) CreateVolumeFromSnapshot(snapshotID string, volumeTyp
|
||||
case <-timeout.C:
|
||||
return "", errors.Errorf("timeout reached waiting for volume %v to be ready", volumeID)
|
||||
case <-ticker.C:
|
||||
if ready, err := sr.blockStorage.IsVolumeReady(volumeID, volumeAZ); err == nil && ready {
|
||||
if ready, err := sr.blockStore.IsVolumeReady(volumeID, volumeAZ); err == nil && ready {
|
||||
return volumeID, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sr *snapshotService) GetAllSnapshots() ([]string, error) {
|
||||
tags := map[string]string{
|
||||
snapshotTagKey: snapshotTagVal,
|
||||
}
|
||||
|
||||
res, err := sr.blockStorage.ListSnapshots(tags)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (sr *snapshotService) CreateSnapshot(volumeID, volumeAZ string) (string, error) {
|
||||
tags := map[string]string{
|
||||
snapshotTagKey: snapshotTagVal,
|
||||
}
|
||||
|
||||
return sr.blockStorage.CreateSnapshot(volumeID, volumeAZ, tags)
|
||||
return sr.blockStore.CreateSnapshot(volumeID, volumeAZ, tags)
|
||||
}
|
||||
|
||||
func (sr *snapshotService) DeleteSnapshot(snapshotID string) error {
|
||||
return sr.blockStorage.DeleteSnapshot(snapshotID)
|
||||
return sr.blockStore.DeleteSnapshot(snapshotID)
|
||||
}
|
||||
|
||||
func (sr *snapshotService) GetVolumeInfo(volumeID, volumeAZ string) (string, *int64, error) {
|
||||
return sr.blockStorage.GetVolumeInfo(volumeID, volumeAZ)
|
||||
return sr.blockStore.GetVolumeInfo(volumeID, volumeAZ)
|
||||
}
|
||||
|
||||
func (sr *snapshotService) GetVolumeID(pv runtime.Unstructured) (string, error) {
|
||||
return sr.blockStore.GetVolumeID(pv)
|
||||
}
|
||||
|
||||
func (sr *snapshotService) SetVolumeID(pv runtime.Unstructured, volumeID string) (runtime.Unstructured, error) {
|
||||
return sr.blockStore.SetVolumeID(pv, volumeID)
|
||||
}
|
||||
|
||||
@@ -19,28 +19,37 @@ package cloudprovider
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// ObjectStorageAdapter exposes basic object-storage operations required
|
||||
// ObjectStore exposes basic object-storage operations required
|
||||
// by Ark.
|
||||
type ObjectStorageAdapter interface {
|
||||
type ObjectStore interface {
|
||||
// Init prepares the ObjectStore for usage using the provided map of
|
||||
// configuration key-value pairs. It returns an error if the ObjectStore
|
||||
// cannot be initialized from the provided config.
|
||||
Init(config map[string]string) error
|
||||
|
||||
// PutObject creates a new object using the data in body within the specified
|
||||
// object storage bucket with the given key.
|
||||
PutObject(bucket string, key string, body io.ReadSeeker) error
|
||||
PutObject(bucket string, key string, body io.Reader) error
|
||||
|
||||
// GetObject retrieves the object with the given key from the specified
|
||||
// bucket in object storage.
|
||||
GetObject(bucket string, key string) (io.ReadCloser, error)
|
||||
|
||||
// ListCommonPrefixes gets a list of all object key prefixes that come
|
||||
// before the provided delimiter (this is often used to simulate a directory
|
||||
// hierarchy in object storage).
|
||||
// before the provided delimiter. For example, if the bucket contains
|
||||
// the keys "foo-1/bar", "foo-1/baz", and "foo-2/baz", and the delimiter
|
||||
// is "/", this will return the slice {"foo-1", "foo-2"}.
|
||||
ListCommonPrefixes(bucket string, delimiter string) ([]string, error)
|
||||
|
||||
// ListObjects gets a list of all objects in bucket that have the same prefix.
|
||||
// ListObjects gets a list of all keys in the specified bucket
|
||||
// that have the given prefix.
|
||||
ListObjects(bucket, prefix string) ([]string, error)
|
||||
|
||||
// DeleteObject removes object with the specified key from the given
|
||||
// DeleteObject removes the object with the specified key from the given
|
||||
// bucket.
|
||||
DeleteObject(bucket string, key string) error
|
||||
|
||||
@@ -48,23 +57,32 @@ type ObjectStorageAdapter interface {
|
||||
CreateSignedURL(bucket, key string, ttl time.Duration) (string, error)
|
||||
}
|
||||
|
||||
// BlockStorageAdapter exposes basic block-storage operations required
|
||||
// BlockStore exposes basic block-storage operations required
|
||||
// by Ark.
|
||||
type BlockStorageAdapter interface {
|
||||
// CreateVolumeFromSnapshot creates a new block volume, initialized from the provided snapshot,
|
||||
type BlockStore interface {
|
||||
// Init prepares the BlockStore for usage using the provided map of
|
||||
// configuration key-value pairs. It returns an error if the BlockStore
|
||||
// cannot be initialized from the provided config.
|
||||
Init(config map[string]string) error
|
||||
|
||||
// CreateVolumeFromSnapshot creates a new block volume in the specified
|
||||
// availability zone, initialized from the provided snapshot,
|
||||
// and with the specified type and IOPS (if using provisioned IOPS).
|
||||
CreateVolumeFromSnapshot(snapshotID, volumeType, volumeAZ string, iops *int64) (volumeID string, err error)
|
||||
|
||||
// GetVolumeInfo returns the type and IOPS (if using provisioned IOPS) for a specified block
|
||||
// volume.
|
||||
// GetVolumeID returns the cloud provider specific identifier for the PersistentVolume.
|
||||
GetVolumeID(pv runtime.Unstructured) (string, error)
|
||||
|
||||
// SetVolumeID sets the cloud provider specific identifier for the PersistentVolume.
|
||||
SetVolumeID(pv runtime.Unstructured, volumeID string) (runtime.Unstructured, error)
|
||||
|
||||
// GetVolumeInfo returns the type and IOPS (if using provisioned IOPS) for
|
||||
// the specified block volume in the given availability zone.
|
||||
GetVolumeInfo(volumeID, volumeAZ string) (string, *int64, error)
|
||||
|
||||
// IsVolumeReady returns whether the specified volume is ready to be used.
|
||||
IsVolumeReady(volumeID, volumeAZ string) (ready bool, err error)
|
||||
|
||||
// ListSnapshots returns a list of all snapshots matching the specified set of tag key/values.
|
||||
ListSnapshots(tagFilters map[string]string) ([]string, error)
|
||||
|
||||
// CreateSnapshot creates a snapshot of the specified block volume, and applies the provided
|
||||
// set of tags to the snapshot.
|
||||
CreateSnapshot(volumeID, volumeAZ string, tags map[string]string) (snapshotID string, err error)
|
||||
|
||||
@@ -24,10 +24,13 @@ import (
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/backup"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/create"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/describe"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/get"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/plugin"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/restore"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/schedule"
|
||||
"github.com/heptio/ark/pkg/cmd/server"
|
||||
runplugin "github.com/heptio/ark/pkg/cmd/server/plugin"
|
||||
"github.com/heptio/ark/pkg/cmd/version"
|
||||
)
|
||||
|
||||
@@ -54,7 +57,10 @@ operations can also be performed as 'ark backup get' and 'ark schedule create'.`
|
||||
server.NewCommand(),
|
||||
version.NewCommand(),
|
||||
get.NewCommand(f),
|
||||
describe.NewCommand(f),
|
||||
create.NewCommand(f),
|
||||
runplugin.NewCommand(),
|
||||
plugin.NewCommand(f),
|
||||
)
|
||||
|
||||
// add the glog flags
|
||||
|
||||
@@ -33,11 +33,9 @@ func NewCommand(f client.Factory) *cobra.Command {
|
||||
NewCreateCommand(f, "create"),
|
||||
NewGetCommand(f, "get"),
|
||||
NewLogsCommand(f),
|
||||
NewDescribeCommand(f, "describe"),
|
||||
NewDownloadCommand(f),
|
||||
|
||||
// Will implement describe later
|
||||
// NewDescribeCommand(f),
|
||||
|
||||
// If you delete a backup and it still exists in object storage, the backup sync controller will
|
||||
// recreate it. Until we have a good UX around this, we're disabling the delete command.
|
||||
// NewDeleteCommand(f),
|
||||
|
||||
@@ -68,7 +68,7 @@ type CreateOptions struct {
|
||||
|
||||
func NewCreateOptions() *CreateOptions {
|
||||
return &CreateOptions{
|
||||
TTL: 24 * time.Hour,
|
||||
TTL: 30 * 24 * time.Hour,
|
||||
IncludeNamespaces: flag.NewStringArray("*"),
|
||||
Labels: flag.NewMap(),
|
||||
SnapshotVolumes: flag.NewOptionalBool(nil),
|
||||
|
||||
@@ -17,18 +17,55 @@ limitations under the License.
|
||||
package backup
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
"github.com/heptio/ark/pkg/cmd"
|
||||
"github.com/heptio/ark/pkg/cmd/util/output"
|
||||
)
|
||||
|
||||
func NewDescribeCommand(f client.Factory) *cobra.Command {
|
||||
func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
|
||||
var listOptions metav1.ListOptions
|
||||
|
||||
c := &cobra.Command{
|
||||
Use: "describe",
|
||||
Short: "Describe a backup",
|
||||
Use: use + " [NAME1] [NAME2] [NAME...]",
|
||||
Short: "Describe backups",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
arkClient, err := f.Client()
|
||||
cmd.CheckError(err)
|
||||
|
||||
var backups *v1.BackupList
|
||||
if len(args) > 0 {
|
||||
backups = new(v1.BackupList)
|
||||
for _, name := range args {
|
||||
backup, err := arkClient.Ark().Backups(v1.DefaultNamespace).Get(name, metav1.GetOptions{})
|
||||
cmd.CheckError(err)
|
||||
backups.Items = append(backups.Items, *backup)
|
||||
}
|
||||
} else {
|
||||
backups, err = arkClient.ArkV1().Backups(v1.DefaultNamespace).List(listOptions)
|
||||
cmd.CheckError(err)
|
||||
}
|
||||
|
||||
first := true
|
||||
for _, backup := range backups.Items {
|
||||
s := output.DescribeBackup(&backup)
|
||||
if first {
|
||||
first = false
|
||||
fmt.Print(s)
|
||||
} else {
|
||||
fmt.Printf("\n\n%s", s)
|
||||
}
|
||||
}
|
||||
cmd.CheckError(err)
|
||||
},
|
||||
}
|
||||
|
||||
c.Flags().StringVarP(&listOptions.LabelSelector, "selector", "l", listOptions.LabelSelector, "only show items matching this label selector")
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -107,7 +107,10 @@ func (o *DownloadOptions) Run(c *cobra.Command, f client.Factory) error {
|
||||
defer backupDest.Close()
|
||||
|
||||
err = downloadrequest.Stream(arkClient.ArkV1(), o.Name, v1.DownloadTargetKindBackupContents, backupDest, o.Timeout)
|
||||
cmd.CheckError(err)
|
||||
if err != nil {
|
||||
os.Remove(o.Output)
|
||||
cmd.CheckError(err)
|
||||
}
|
||||
|
||||
fmt.Printf("Backup %s has been successfully downloaded to %s\n", o.Name, backupDest.Name())
|
||||
return nil
|
||||
|
||||
@@ -49,7 +49,7 @@ func NewGetCommand(f client.Factory, use string) *cobra.Command {
|
||||
backups.Items = append(backups.Items, *backup)
|
||||
}
|
||||
} else {
|
||||
backups, err = arkClient.ArkV1().Backups(api.DefaultNamespace).List(metav1.ListOptions{})
|
||||
backups, err = arkClient.ArkV1().Backups(api.DefaultNamespace).List(listOptions)
|
||||
cmd.CheckError(err)
|
||||
}
|
||||
|
||||
|
||||
51
pkg/cmd/cli/describe/describe.go
Normal file
51
pkg/cmd/cli/describe/describe.go
Normal file
@@ -0,0 +1,51 @@
|
||||
/*
|
||||
Copyright 2017 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package describe
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/backup"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/restore"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/schedule"
|
||||
)
|
||||
|
||||
func NewCommand(f client.Factory) *cobra.Command {
|
||||
c := &cobra.Command{
|
||||
Use: "describe",
|
||||
Short: "Describe ark resources",
|
||||
Long: "Describe ark resources",
|
||||
}
|
||||
|
||||
backupCommand := backup.NewDescribeCommand(f, "backups")
|
||||
backupCommand.Aliases = []string{"backup"}
|
||||
|
||||
scheduleCommand := schedule.NewDescribeCommand(f, "schedules")
|
||||
scheduleCommand.Aliases = []string{"schedule"}
|
||||
|
||||
restoreCommand := restore.NewDescribeCommand(f, "restores")
|
||||
restoreCommand.Aliases = []string{"restore"}
|
||||
|
||||
c.AddCommand(
|
||||
backupCommand,
|
||||
scheduleCommand,
|
||||
restoreCommand,
|
||||
)
|
||||
|
||||
return c
|
||||
}
|
||||
162
pkg/cmd/cli/plugin/add.go
Normal file
162
pkg/cmd/cli/plugin/add.go
Normal file
@@ -0,0 +1,162 @@
|
||||
/*
|
||||
Copyright 2017 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"k8s.io/api/apps/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
|
||||
ark "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
"github.com/heptio/ark/pkg/cmd"
|
||||
"github.com/heptio/ark/pkg/cmd/util/flag"
|
||||
)
|
||||
|
||||
const (
|
||||
pluginsVolumeName = "plugins"
|
||||
arkDeployment = "ark"
|
||||
arkContainer = "ark"
|
||||
)
|
||||
|
||||
func NewAddCommand(f client.Factory) *cobra.Command {
|
||||
var (
|
||||
imagePullPolicies = []string{string(v1.PullAlways), string(v1.PullIfNotPresent), string(v1.PullNever)}
|
||||
imagePullPolicyFlag = flag.NewEnum(string(v1.PullIfNotPresent), imagePullPolicies...)
|
||||
)
|
||||
|
||||
c := &cobra.Command{
|
||||
Use: "add IMAGE",
|
||||
Short: "Add a plugin",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
if len(args) != 1 {
|
||||
cmd.CheckError(errors.New("you must specify only one argument, the plugin container image"))
|
||||
}
|
||||
|
||||
kubeClient, err := f.KubeClient()
|
||||
if err != nil {
|
||||
cmd.CheckError(err)
|
||||
}
|
||||
|
||||
arkDeploy, err := kubeClient.AppsV1beta1().Deployments(ark.DefaultNamespace).Get(arkDeployment, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
cmd.CheckError(err)
|
||||
}
|
||||
|
||||
original, err := json.Marshal(arkDeploy)
|
||||
cmd.CheckError(err)
|
||||
|
||||
// ensure the plugins volume & mount exist
|
||||
volumeExists := false
|
||||
for _, volume := range arkDeploy.Spec.Template.Spec.Volumes {
|
||||
if volume.Name == pluginsVolumeName {
|
||||
volumeExists = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !volumeExists {
|
||||
volume := v1.Volume{
|
||||
Name: pluginsVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
}
|
||||
|
||||
volumeMount := v1.VolumeMount{
|
||||
Name: pluginsVolumeName,
|
||||
MountPath: "/plugins",
|
||||
}
|
||||
|
||||
arkDeploy.Spec.Template.Spec.Volumes = append(arkDeploy.Spec.Template.Spec.Volumes, volume)
|
||||
|
||||
containers := arkDeploy.Spec.Template.Spec.Containers
|
||||
containerIndex := -1
|
||||
for x, container := range containers {
|
||||
if container.Name == arkContainer {
|
||||
containerIndex = x
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if containerIndex < 0 {
|
||||
cmd.CheckError(errors.New("ark container not found in ark deployment"))
|
||||
}
|
||||
|
||||
containers[containerIndex].VolumeMounts = append(containers[containerIndex].VolumeMounts, volumeMount)
|
||||
}
|
||||
|
||||
// add the plugin as an init container
|
||||
plugin := v1.Container{
|
||||
Name: getName(args[0]),
|
||||
Image: args[0],
|
||||
ImagePullPolicy: v1.PullPolicy(imagePullPolicyFlag.String()),
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: pluginsVolumeName,
|
||||
MountPath: "/target",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
arkDeploy.Spec.Template.Spec.InitContainers = append(arkDeploy.Spec.Template.Spec.InitContainers, plugin)
|
||||
|
||||
// create & apply the patch
|
||||
updated, err := json.Marshal(arkDeploy)
|
||||
cmd.CheckError(err)
|
||||
|
||||
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(original, updated, v1beta1.Deployment{})
|
||||
cmd.CheckError(err)
|
||||
|
||||
_, err = kubeClient.AppsV1beta1().Deployments(ark.DefaultNamespace).Patch(arkDeploy.Name, types.StrategicMergePatchType, patchBytes)
|
||||
cmd.CheckError(err)
|
||||
},
|
||||
}
|
||||
|
||||
c.Flags().Var(imagePullPolicyFlag, "image-pull-policy", fmt.Sprintf("the imagePullPolicy for the plugin container. Valid values are %s.", strings.Join(imagePullPolicies, ", ")))
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// getName returns the 'name' component of a docker
|
||||
// image (i.e. everything after the last '/' and before
|
||||
// any subsequent ':')
|
||||
func getName(image string) string {
|
||||
slashIndex := strings.LastIndex(image, "/")
|
||||
colonIndex := strings.LastIndex(image, ":")
|
||||
|
||||
start := 0
|
||||
if slashIndex > 0 {
|
||||
start = slashIndex + 1
|
||||
}
|
||||
|
||||
end := len(image)
|
||||
if colonIndex > slashIndex {
|
||||
end = colonIndex
|
||||
}
|
||||
|
||||
return image[start:end]
|
||||
}
|
||||
47
pkg/cmd/cli/plugin/add_test.go
Normal file
47
pkg/cmd/cli/plugin/add_test.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGetName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
image string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "image name with registry hostname and tag",
|
||||
image: "gcr.io/my-repo/my-image:latest",
|
||||
expected: "my-image",
|
||||
},
|
||||
{
|
||||
name: "image name with registry hostname, without tag",
|
||||
image: "gcr.io/my-repo/my-image",
|
||||
expected: "my-image",
|
||||
},
|
||||
{
|
||||
name: "image name without registry hostname, with tag",
|
||||
image: "my-repo/my-image:latest",
|
||||
expected: "my-image",
|
||||
},
|
||||
{
|
||||
name: "image name without registry hostname, without tag",
|
||||
image: "my-repo/my-image",
|
||||
expected: "my-image",
|
||||
},
|
||||
{
|
||||
name: "image name with registry hostname and port, and tag",
|
||||
image: "mycustomregistry.io:8080/my-repo/my-image:latest",
|
||||
expected: "my-image",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
assert.Equal(t, test.expected, getName(test.image))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,5 @@
|
||||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
Copyright 2017 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -16,12 +14,25 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package util
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
)
|
||||
|
||||
func Umask(mask int) (int, error) {
|
||||
return 0, errors.New("platform and architecture is not supported")
|
||||
func NewCommand(f client.Factory) *cobra.Command {
|
||||
c := &cobra.Command{
|
||||
Use: "plugin",
|
||||
Short: "Work with plugins",
|
||||
Long: "Work with plugins",
|
||||
}
|
||||
|
||||
c.AddCommand(
|
||||
NewAddCommand(f),
|
||||
NewRemoveCommand(f),
|
||||
)
|
||||
|
||||
return c
|
||||
}
|
||||
87
pkg/cmd/cli/plugin/remove.go
Normal file
87
pkg/cmd/cli/plugin/remove.go
Normal file
@@ -0,0 +1,87 @@
|
||||
/*
|
||||
Copyright 2017 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"k8s.io/api/apps/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
|
||||
ark "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
"github.com/heptio/ark/pkg/cmd"
|
||||
)
|
||||
|
||||
func NewRemoveCommand(f client.Factory) *cobra.Command {
|
||||
c := &cobra.Command{
|
||||
Use: "remove [NAME | IMAGE]",
|
||||
Short: "Remove a plugin",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
if len(args) != 1 {
|
||||
cmd.CheckError(errors.New("you must specify only one argument, the plugin container's name or image"))
|
||||
}
|
||||
|
||||
kubeClient, err := f.KubeClient()
|
||||
if err != nil {
|
||||
cmd.CheckError(err)
|
||||
}
|
||||
|
||||
arkDeploy, err := kubeClient.AppsV1beta1().Deployments(ark.DefaultNamespace).Get(arkDeployment, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
cmd.CheckError(err)
|
||||
}
|
||||
|
||||
original, err := json.Marshal(arkDeploy)
|
||||
cmd.CheckError(err)
|
||||
|
||||
var (
|
||||
initContainers = arkDeploy.Spec.Template.Spec.InitContainers
|
||||
index = -1
|
||||
)
|
||||
|
||||
for x, container := range initContainers {
|
||||
if container.Name == args[0] || container.Image == args[0] {
|
||||
index = x
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if index == -1 {
|
||||
cmd.CheckError(errors.Errorf("init container %s not found in Ark server deployment", args[0]))
|
||||
}
|
||||
|
||||
arkDeploy.Spec.Template.Spec.InitContainers = append(initContainers[0:index], initContainers[index+1:]...)
|
||||
|
||||
updated, err := json.Marshal(arkDeploy)
|
||||
cmd.CheckError(err)
|
||||
|
||||
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(original, updated, v1beta1.Deployment{})
|
||||
cmd.CheckError(err)
|
||||
|
||||
_, err = kubeClient.AppsV1beta1().Deployments(ark.DefaultNamespace).Patch(arkDeploy.Name, types.StrategicMergePatchType, patchBytes)
|
||||
cmd.CheckError(err)
|
||||
},
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
@@ -17,18 +17,55 @@ limitations under the License.
|
||||
package restore
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
"github.com/heptio/ark/pkg/cmd"
|
||||
"github.com/heptio/ark/pkg/cmd/util/output"
|
||||
)
|
||||
|
||||
func NewDescribeCommand(f client.Factory) *cobra.Command {
|
||||
func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
|
||||
var listOptions metav1.ListOptions
|
||||
|
||||
c := &cobra.Command{
|
||||
Use: "describe",
|
||||
Short: "Describe a backup",
|
||||
Use: use + " [NAME1] [NAME2] [NAME...]",
|
||||
Short: "Describe restores",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
arkClient, err := f.Client()
|
||||
cmd.CheckError(err)
|
||||
|
||||
var restores *api.RestoreList
|
||||
if len(args) > 0 {
|
||||
restores = new(api.RestoreList)
|
||||
for _, name := range args {
|
||||
restore, err := arkClient.Ark().Restores(api.DefaultNamespace).Get(name, metav1.GetOptions{})
|
||||
cmd.CheckError(err)
|
||||
restores.Items = append(restores.Items, *restore)
|
||||
}
|
||||
} else {
|
||||
restores, err = arkClient.ArkV1().Restores(api.DefaultNamespace).List(listOptions)
|
||||
cmd.CheckError(err)
|
||||
}
|
||||
|
||||
first := true
|
||||
for _, restore := range restores.Items {
|
||||
s := output.DescribeRestore(&restore, arkClient)
|
||||
if first {
|
||||
first = false
|
||||
fmt.Print(s)
|
||||
} else {
|
||||
fmt.Printf("\n\n%s", s)
|
||||
}
|
||||
}
|
||||
cmd.CheckError(err)
|
||||
},
|
||||
}
|
||||
|
||||
c.Flags().StringVarP(&listOptions.LabelSelector, "selector", "l", listOptions.LabelSelector, "only show items matching this label selector")
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user