Compare commits

..

202 Commits

Author SHA1 Message Date
Carlisia
659a852c8c Merge pull request #760 from skriss/v0.9.3-cherrypicks
V0.9.3 cherrypicks
2018-08-10 11:16:49 -07:00
Nolan Brubaker
ca8ae18020 Add v0.9.3 changelog entry
Signed-off-by: Nolan Brubaker <nolan@heptio.com>
2018-08-10 10:12:02 -07:00
Alex Lemaresquier
9f80f01c2a Initialize schedule Prometheus metrics to have them created beforehand (see https://prometheus.io/docs/practices/instrumentation/#avoid-missing-metrics)
Signed-off-by: Alex Lemaresquier <alex+github@lemaresquier.org>
2018-08-10 10:11:42 -07:00
Carlisia
0acd368291 Merge pull request #707 from skriss/release-v0.9.2
Release v0.9.2
2018-07-26 14:19:04 -07:00
Steve Kriss
0640cdab06 update changelog for v0.9.2
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-07-26 14:06:57 -07:00
Steve Kriss
d21ce48db1 fix bug preventing backup item action item updates from saving
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-07-26 14:02:01 -07:00
Carlisia
10a1fe2bfa Merge pull request #695 from skriss/release-0.9
cherry-pick commits for v0.9.1
2018-07-23 13:37:00 -07:00
Steve Kriss
07ce4988e3 update CHANGELOG.md for v0.9.1
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-07-23 12:13:27 -07:00
Steve Kriss
89e4611d1b cleanup service account action log statement
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-07-23 12:13:20 -07:00
Nolan Brubaker
7d6bebadc4 Add RBAC support for 1.7 clusters
Signed-off-by: Nolan Brubaker <nolan@heptio.com>
2018-07-23 12:01:40 -07:00
Steve Kriss
84f872e4d5 delete old deletion requests for backup when processing a new one
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-07-23 10:43:39 -07:00
Steve Kriss
b566a7c101 return nil error if 404 encountered when deleting snapshots
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-07-23 10:43:32 -07:00
Steve Kriss
b4f8d7cb5f fix tagging latest by using make's ifeq
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-07-20 12:06:29 -07:00
Steve Kriss
c23d9dd7c5 exit server if not all Ark CRDs exist at startup
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-07-20 11:57:16 -07:00
Steve Kriss
400e8a165b require namespace for backups/etc. to exist at server startup
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-07-20 11:53:25 -07:00
Andy Goldstein
efae9792db Merge pull request #642 from skriss/v0.9.0-changelog
update changelog for v0.9.0, v0.8.2, v0.8.3
2018-07-06 11:52:20 -04:00
Andy Goldstein
8327536b59 Merge pull request #643 from skriss/restic-docs-updates-v0.9.0
update restic docs for v0.9.0 GA release
2018-07-06 11:48:28 -04:00
Steve Kriss
a2c1fece33 update restic docs for v0.9.0 GA release
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-07-06 08:28:11 -07:00
Steve Kriss
8e7a2eed77 update changelog for v0.9.0, v0.8.2, v0.8.3
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-07-06 08:25:13 -07:00
Andy Goldstein
562a719382 Merge pull request #639 from Bradamant3/docs-reorg
Docs reorg
2018-07-06 11:18:41 -04:00
JENNIFER RONDEAU
cfdcd65f41 reorganize and edit docs for upcoming release
Signed-off-by: JENNIFER RONDEAU <jrondeau@heptio.com>
2018-07-06 10:53:01 -04:00
Andy Goldstein
2999f158db Merge pull request #550 from skriss/restic-describers
Add pod volume backups/restores to ark backup/restore describe
2018-07-05 16:27:36 -04:00
Steve Kriss
1e08e81537 Merge pull request #645 from ncdc/fix-restic-test
Fix flakey restic tests
2018-07-05 13:27:22 -07:00
Andy Goldstein
8dd9cded1a Fix testing arbitrary map key order
Signed-off-by: Andy Goldstein <andy.goldstein@gmail.com>
2018-07-05 16:22:46 -04:00
Steve Kriss
42f2891485 add pod volume backups/restores to ark backup/restore describe output
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-07-05 13:13:40 -07:00
Andy Goldstein
9db5e36b54 Fix test sorting function
Signed-off-by: Andy Goldstein <andy.goldstein@gmail.com>
2018-07-05 16:08:05 -04:00
Andy Goldstein
a70456f5ee Merge pull request #628 from skriss/restic-units
pkg/restic unit tests
2018-07-05 15:01:04 -04:00
JENNIFER RONDEAU
3646fcce46 add docs site build to gitignore
Signed-off-by: JENNIFER RONDEAU <jrondeau@heptio.com>
2018-07-05 14:57:15 -04:00
Steve Kriss
c18decc89b pkg/restic unit tests
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-07-05 11:44:17 -07:00
Andy Goldstein
5ce92adff0 Merge pull request #636 from skriss/restic-set-hostname
set --hostname flag for restic backups
2018-07-03 16:49:15 -04:00
Steve Kriss
547625c333 set --hostname flag for restic backups
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-07-02 10:52:32 -07:00
Steve Kriss
32907931e1 Merge pull request #633 from hamidzr/master
fixed minor typos
2018-06-29 14:28:56 -07:00
Hamid Zare
244994d316 fixed typos/capitalization
Signed-off-by: Hamid Zare <dellydela@gmail.com>
2018-06-29 16:23:33 -05:00
Nolan Brubaker
39bb3963ee Merge pull request #631 from skriss/restic-refactor-get-snapshot-id
use pkg/util/exec for running get snapshot id cmd
2018-06-29 17:03:46 -04:00
Steve Kriss
ae4aad0890 use pkg/util/exec for running get snapshot id cmd
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-29 12:06:31 -07:00
Andy Goldstein
1857257265 Merge pull request #621 from skriss/update-restic-docs
update restic doc
2018-06-29 14:04:22 -04:00
Andy Goldstein
eb19228d16 Merge pull request #626 from ncdc/add-backups-restores-to-non-restorable-resources
Don't restore backups or restores
2018-06-29 09:53:19 -04:00
Steve Kriss
afc9e9cde1 update restic documentation
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-28 15:35:12 -07:00
Andy Goldstein
fe286ff564 Don't restore backups or restores
Add backups and restores the list of non restorable resources. Backups,
if applicable, are synced from object storage by the backup sync
controller. Restores are specific to a cluster and don't have value
moving across clusters.

Signed-off-by: Andy Goldstein <andy.goldstein@gmail.com>
2018-06-28 16:58:09 -04:00
Andy Goldstein
1cc99ffa60 Merge pull request #625 from skriss/faq-bucket-per-cluster
add FAQ about using a bucket per cluster
2018-06-28 16:53:02 -04:00
Steve Kriss
31b8ff92df add FAQ about using a bucket per cluster
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-28 13:40:02 -07:00
Andy Goldstein
eaeb9d677e Merge pull request #608 from skriss/no-pv-snapshot-if-restic-backup
don't snapshot volumes that have been backed up with restic
2018-06-28 13:27:30 -04:00
Steve Kriss
11c176c490 don't snapshot volumes that have been backed up with restic
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-28 10:19:34 -07:00
Andy Goldstein
539de6d361 Merge pull request #564 from nrb/backup-timing
Record backup start and completion times, add timing metrics
2018-06-28 11:37:39 -04:00
Nolan Brubaker
96b72acb2d Record backup start and completion times
Signed-off-by: Nolan Brubaker <nolan@heptio.com>
2018-06-28 11:18:14 -04:00
Andy Goldstein
fa470170cf Merge pull request #610 from skriss/restic-skip-invalid-volumes
log and skip backups for non-existent & hostPath volumes
2018-06-28 10:49:33 -04:00
Nolan Brubaker
75a9879774 Merge pull request #620 from ncdc/gitignore
Ignore more files
2018-06-28 10:44:45 -04:00
Nolan Brubaker
a5722262d1 Merge pull request #619 from ncdc/skip-mirror-pods
Stop restoring mirror pods
2018-06-28 10:43:44 -04:00
Andy Goldstein
dd7bdf05f3 Ignore more files
Signed-off-by: Andy Goldstein <andy.goldstein@gmail.com>
2018-06-28 10:11:23 -04:00
Andy Goldstein
255a991c6e Stop restoring mirror pods
Mirror pods are pods created from static manifest files on a node.
They're mirrored to the apiserver so they're visible when querying the
apiserver for a list of pods, but it's not possible to send a pod
containing the mirror pod annotation to the apiserver and have it be
created successfully. Instead of trying to do this, log a message that
we're skipping restoring the pod because it's a mirror pod.

Signed-off-by: Andy Goldstein <andy.goldstein@gmail.com>
2018-06-28 10:06:55 -04:00
Steve Kriss
781b7cd1aa log and skip backups for non-existent & hostPath volumes
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-27 14:37:36 -07:00
Andy Goldstein
51298f84cc Merge pull request #606 from skriss/restic-auto-manage-repos
Automatically manage restic repos
2018-06-27 17:01:27 -04:00
Steve Kriss
22e8f23e2c replace ark restic repo init cmd with automatic initialization of repos
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-27 13:50:37 -07:00
Andy Goldstein
e015238e6d Merge pull request #570 from skriss/restic-controller-improvements
Restic controller improvements
2018-06-27 16:44:00 -04:00
Steve Kriss
a697ad164e refine what gets enqueued in PVB/PVR controllers, and log better
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-27 13:31:11 -07:00
Nolan Brubaker
29ac0b4a6c Merge pull request #613 from ncdc/ignore-reclaim-policy-delete-pvs-without-snapshots
Skip reclaim policy Delete PVs without snapshots
2018-06-26 16:52:01 -04:00
Andy Goldstein
ee5afe148c Merge pull request #566 from runyontr/master
Minio should support read-only file systems
2018-06-26 13:43:02 -04:00
Andy Goldstein
7c283e5de8 Skip reclaim policy Delete PVs without snapshots
If a PV has a reclaim policy of Delete and we didn't create a snapshot
of it, don't restore the PV, as doing so would create a PV whose
underlying volume is incorrect.

Also "reset" any PVCs bound to the PV so they'll be dynamically
provisioned when restored.

Signed-off-by: Andy Goldstein <andy.goldstein@gmail.com>
2018-06-26 10:40:49 -04:00
Andy Goldstein
5e28f322cf Merge pull request #580 from nikhita/add-omitempty-tags
Add omitempty tags to optional API fields
2018-06-25 10:32:22 -04:00
Nikhita Raghunath
0da5f1ccca Add omitempty tag to optional API fields
Signed-off-by: Nikhita Raghunath <nikitaraghunath@gmail.com>
2018-06-25 16:40:43 +05:30
Nolan Brubaker
6426706390 Merge pull request #571 from skriss/restic-aws-region
restic: if S3, get bucket's region up-front
2018-06-22 16:59:32 -04:00
Andy Goldstein
636b09a548 Merge pull request #569 from skriss/fix-mount-propagation
restic: use HostToContainer mount propagation for host-pods volume
2018-06-22 06:23:30 -04:00
Steve Kriss
5ad21854f7 restic: if S3, get bucket's region up-front
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-21 17:20:53 -07:00
Steve Kriss
57c5485501 restic: use HostToContainer mount prop. for host-pods volume
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-21 16:32:44 -07:00
Thomas Runyon
e4856d17ca Merge branch 'master' of github.com:heptio/ark
Signed-off-by: Thomas Runyon <runyontr@gmail.com>
2018-06-21 16:50:49 -04:00
Thomas Runyon
fb0696d0c3 Deploy minio to suppport read only file systems
Signed-off-by: Thomas Runyon <runyontr@gmail.com>
2018-06-21 16:46:45 -04:00
Andy Goldstein
13344076c2 Merge pull request #563 from skriss/restic-cache-dir
add a /scratch emptyDir to ark pods to use for restic cache
2018-06-20 14:59:04 -04:00
Steve Kriss
25d3597c9a add a /scratch emptyDir to ark pods to use for restic cache
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-20 11:48:09 -07:00
Nolan Brubaker
4a7457ecfe Merge pull request #557 from skriss/move-repo-id-source
Use ResticRepository's resticIdentifier field as the source of truth for repo IDs
2018-06-20 13:50:21 -04:00
Steve Kriss
f2072e5868 use repo identifier from ResticRepo CR when backing up/restoring
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-20 10:42:53 -07:00
Andy Goldstein
8306566216 Merge pull request #532 from skriss/restic-repo-crd
add ResticRepository CRD and move repo-level actions to a controller
2018-06-20 13:42:32 -04:00
Steve Kriss
a927906e52 add --maintenance-frequency flag to ark restic repo init
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-20 09:21:43 -07:00
Steve Kriss
6e9e653f76 add restic repo getter and reorg restic cmds
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-20 09:21:43 -07:00
Steve Kriss
3481618324 move runCommand to pkg/util/exec and use in restic repo mgr
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-20 09:21:43 -07:00
Steve Kriss
65ed8da4b7 add ResticRepository CRD and move repo-level actions to a controller
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-20 09:21:43 -07:00
Andy Goldstein
f1e82a2fe3 Merge pull request #561 from skriss/enable-local-restic
move restic binary into /usr/bin so it's in PATH
2018-06-19 14:59:24 -04:00
Steve Kriss
de12ca4882 move restic binary into /usr/bin so it's in PATH
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-19 11:31:27 -07:00
Steve Kriss
01b5828ee7 Merge pull request #403 from nrb/merge-serviceaccount-secrets
Merge serviceaccounts on restore
2018-06-15 09:46:26 -07:00
Nolan Brubaker
e7d00cf5fd Add merge support for serviceaccounts
All properties from a backup will be merged into the ServiceAccount
except for the default token secret.

Signed-off-by: Nolan Brubaker <nolan@heptio.com>
2018-06-15 12:40:59 -04:00
Steve Kriss
2dfa7a1a72 Merge pull request #553 from ncdc/log-to-stdout
Log to stdout instead of stderr
2018-06-15 08:58:08 -07:00
Andy Goldstein
845c9cfa61 Merge pull request #555 from skriss/simplify-restores
get rid of restore staging dir by backing up/restoring within volume dir
2018-06-14 17:15:35 -04:00
Steve Kriss
6fb11b8087 get rid of restore staging dir by backing up/restoring within volume dir
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-14 13:49:27 -07:00
Andy Goldstein
cc9140b3cc Log to stdout instead of stderr
Signed-off-by: Andy Goldstein <andy.goldstein@gmail.com>
2018-06-14 14:02:12 -04:00
Andy Goldstein
7be81fe60e Merge pull request #546 from skriss/rm-old-donefiles
remove existing .ark dir and contents during restic restores
2018-06-14 11:12:31 -04:00
Andy Goldstein
bc20398119 Merge pull request #551 from ashish-amarnath/update-metrics-examples
Expose metrics server port from container
2018-06-14 07:36:46 -04:00
Ashish Amarnath
05e86ee734 expose container ports in example deployment and pkg/install tooling
Signed-off-by: Ashish Amarnath <ashish.amarnath@gmail.com>
2018-06-13 19:22:09 -07:00
Steve Kriss
dc273e3bed remove existing .ark dir and contents during restic restores
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-13 16:52:06 -07:00
Nolan Brubaker
11c3837f9b Merge pull request #548 from skriss/update-restic-docs
remove references to a specific ark version for using restic
2018-06-13 17:10:02 -04:00
Steve Kriss
5d7969f4b9 remove references to a specific ark version for using restic
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-13 14:04:55 -07:00
Andy Goldstein
9245e9d5dc Merge pull request #547 from skriss/restic-rsync
use rsync to copy restored data from staging dir to volume dir
2018-06-13 16:56:42 -04:00
Steve Kriss
f7a42f378f use rsync to copy restored data from staging dir to volume dir
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-13 13:41:55 -07:00
Andy Goldstein
d1e3688468 Merge pull request #531 from ashish-amarnath/add-backup-metrics
Add metrics package to publish metrics
2018-06-13 16:33:30 -04:00
Ashish Amarnath
83658e891e Add a metrics package to add and expose metrics
* add a metrics package to handle metric registration and publishing
* add a metricsAddress field to the server struct
* make metrics a part of the server
* start a metrics endpoint as part of starting the controllers
* instrument backup_controller to report metrics
* update cli-reference docs
* update example deployments with prometheus annotations
* update 'pkg/install' tooling with prometheus annotations

Signed-off-by: Ashish Amarnath <ashish.amarnath@gmail.com>
2018-06-13 13:17:08 -07:00
Andy Goldstein
3db7c038a5 Merge pull request #545 from skriss/update-restic-instructions
update restic setup instructions
2018-06-12 15:23:50 -04:00
Steve Kriss
6236085327 update restic setup instructions
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-12 11:59:53 -07:00
Andy Goldstein
6da32a4955 Merge pull request #544 from ae-v/master
fix typo and whitespace chars in docs
2018-06-12 09:34:26 -04:00
Andre Veelken
0e0ac10388 fix typo and whitespace chars in docs
Signed-off-by: Andre Veelken
2018-06-12 15:17:28 +02:00
Steve Kriss
bd0b874631 Merge pull request #542 from ncdc/makefile-tag-latest-bool
Add TAG_LATEST support to Makefile
2018-06-11 12:51:17 -07:00
Andy Goldstein
a522a96789 Add TAG_LATEST support to Makefile
Only tag an image as "latest" if TAG_LATEST is set to "true".

Signed-off-by: Andy Goldstein <andy.goldstein@gmail.com>
2018-06-11 15:00:15 -04:00
Andy Goldstein
3177140db0 Merge pull request #541 from skriss/rename-init-container
rename restic-init-container to ark-restic-restore-helper
2018-06-11 13:57:14 -04:00
Steve Kriss
d7134b1df2 use Ark version as the tag for the restore-helper image in init container
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-11 10:46:10 -07:00
Steve Kriss
81520a9b86 rename restic-init-container to ark-restic-restore-helper
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-11 10:24:23 -07:00
Andy Goldstein
453b0a04f4 Merge pull request #540 from skriss/restic-v0.9.1
upgrade to restic v0.9.1
2018-06-11 13:16:17 -04:00
Andy Goldstein
459fe663ee Merge pull request #534 from skriss/rm-complete-restore-sh
replace complete-restore.sh with go code
2018-06-11 13:00:46 -04:00
Steve Kriss
50816ba23b upgrade to restic v0.9.1
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-11 09:59:29 -07:00
Steve Kriss
2c6fc5bd90 don't fail the restore if cleanup of the staging directory post-restore fails
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-11 09:48:26 -07:00
Steve Kriss
6897c2f901 move dotfiles from staging to volume dir on restore
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-11 09:31:03 -07:00
Steve Kriss
e354b1c130 replace complete-restore.sh with go code
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-11 09:31:03 -07:00
Andy Goldstein
e1cf244592 Merge pull request #537 from skriss/optimize-dockerfile
Dockerfile: move restic fetch before ark add so it can be cached
2018-06-11 12:28:26 -04:00
Andy Goldstein
d9924e0f3f Merge pull request #535 from skriss/rename-restic-daemonset
rename ark daemonset to ark restic server
2018-06-11 12:28:05 -04:00
Steve Kriss
c1f4e6d92d rename ark daemonset to ark restic server
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-11 09:12:49 -07:00
Steve Kriss
e7453ebc98 Dockerfile: move restic fetch before ark add so it can be cached
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-08 15:02:47 -07:00
Andy Goldstein
0396ca1dee Merge pull request #526 from yastij/ignore-resources-terminating
ignore terminating resources while doing a backup
2018-06-08 11:10:59 -04:00
Yassine TIJANI
17f6a14d37 ignore terminating resources while doing a backup
Signed-off-by: Yassine TIJANI <yasstij11@gmail.com>
2018-06-08 16:49:35 +02:00
Andy Goldstein
c0cf61912d Merge pull request #533 from skriss/fix-init-repo-key-data
ark restic init-repository: use key data if provided
2018-06-07 13:39:18 -04:00
Steve Kriss
67b40c7fc8 use fake filesystem to test key file flag for ark restic init-repo
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-07 10:29:59 -07:00
Steve Kriss
e2561f9073 ark restic init-repository: use key data if provided
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-07 10:06:15 -07:00
Andy Goldstein
e3d6902ede Merge pull request #530 from skriss/restic-quickstart-doc
restic quickstart doc for alpha testing
2018-06-06 15:33:43 -04:00
Steve Kriss
68020d0e4b restic quickstart doc for alpha testing
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-06 12:25:09 -07:00
Andy Goldstein
ed2d7b445c Merge pull request #508 from skriss/real-restic
restic integration with Ark
2018-06-06 13:24:56 -04:00
Steve Kriss
50d4084fac add restic integration for doing pod volume backups/restores
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-06 09:48:10 -07:00
Steve Kriss
c2c5b9040c add kube listers/informers to vendor/
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-06-04 14:52:35 -07:00
Wayne Witzel III
6f62749c1a Merge pull request #520 from ncdc/fix-backup-pv-action-when-volumeName-is-missing
BackupItemActionPlugin: handle nil updatedItem
2018-06-01 14:28:05 -04:00
Andy Goldstein
86b9cc6d15 BackupItemActionPlugin: handle nil updatedItem
Handle the case where a BackupItemAction may return nil for updatedItem,
meaning "no modifications to the item". The backupPVAction does this,
and we were panicking instead of accepting it.

Signed-off-by: Andy Goldstein <andy.goldstein@gmail.com>
2018-06-01 14:03:32 -04:00
Andy Goldstein
4e2a77d683 Merge pull request #518 from mattkelly/readme-selector-fix
Fix selector syntax in backup example
2018-05-29 10:10:34 -04:00
Matt Kelly
ca83f000ea Fix selector syntax in backup example
Signed-off-by: Matt Kelly <matt.kelly@containership.io>
2018-05-29 10:01:57 -04:00
Andy Goldstein
10d6dd006a Merge pull request #517 from c-knowles/bug/doc-default-resourcePriorities
Correct default restorePriorities docs
2018-05-26 08:47:00 -04:00
Chris Knowles
4065c0f194 Add limitRanges to default restorePriorities doc
Signed-off-by: Chris Knowles <c-knowles@users.noreply.github.com>
2018-05-26 20:23:23 +08:00
Chris Knowles
5643e8ebb5 Correct default restorePriorities
Changed in https://github.com/heptio/ark/pull/258

Signed-off-by: Chris Knowles <c-knowles@users.noreply.github.com>
2018-05-26 20:09:46 +08:00
Andy Goldstein
6dbde599bf Merge pull request #514 from carlpett/post-hooks-after-error
Run post-hooks even if backup actions fail
2018-05-23 13:37:28 -04:00
Calle Pettersson
b2ec87f05f Run post-hooks even if backup actions fail
Signed-off-by: Calle Pettersson <cpettsson@gmail.com>
2018-05-23 18:44:55 +02:00
Steve Kriss
fb33d93186 Merge pull request #512 from carlpett/additional-backup-error
Handle errors in additionalItemBackupper
2018-05-23 09:02:20 -07:00
Steve Kriss
170034787d Merge pull request #513 from ncdc/510-follow-ups
Fix copyright date, add happy path test case
2018-05-23 08:54:22 -07:00
Calle Pettersson
b92d086712 Add additional test
Signed-off-by: Calle Pettersson <cpettsson@gmail.com>
2018-05-23 17:28:51 +02:00
Andy Goldstein
18e2401e79 Merge pull request #455 from skriss/remove-aws-region
AWS: remove region requirement from config for backupStorageProvider
2018-05-23 11:02:57 -04:00
Andy Goldstein
849297e623 Fix copyright date, add happy path test case
Signed-off-by: Andy Goldstein <andy.goldstein@gmail.com>
2018-05-23 09:59:32 -04:00
Andy Goldstein
92e9d307a5 Merge pull request #510 from nrb/failed-backup-upload-491
Fail backup if object store upload fails
2018-05-23 09:55:11 -04:00
Nolan Brubaker
09bbe072cd Check errors when closing GCP objects
Writing to GCP's object store is any async operation, so errors need to
be checked both on write and close calls, since errors like permission
violations aren't reported until a close.

Signed-off-by: Nolan Brubaker <nolan@heptio.com>
2018-05-22 17:38:49 -04:00
Andy Goldstein
5ff582ec42 Merge pull request #501 from dhananjaysathe/downstream
Add documentation on how to use a selector negation
2018-05-22 17:24:42 -04:00
Steve Kriss
b029860b46 AWS: remove region requirement from config for backupStorageProvider
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-05-22 11:59:53 -07:00
Andy Goldstein
8ce2006814 Merge pull request #506 from marpaia/config-options
Add functional options for the Ark config install library
2018-05-22 14:19:45 -04:00
Calle Pettersson
24dfef6f15 Handle errors in additionalItemBackupper
Signed-off-by: Calle Pettersson <cpettsson@gmail.com>
2018-05-22 18:32:48 +02:00
Mike Arpaia
cb7bcea5c3 Add functional options for the Ark config install library
Signed-off-by: Mike Arpaia <mike@arpaia.co>
2018-05-22 09:21:12 -06:00
Andy Goldstein
aeb5f6d832 Merge pull request #437 from marpaia/install-api
Add library code to install required server components
2018-05-18 16:48:42 -04:00
Mike Arpaia
16f707aa11 Add library code to install required server components
This PR includes Go library code to assist with the installation of
required server-side components.

Signed-off-by: Mike Arpaia <mike@arpaia.co>
2018-05-18 14:43:06 -06:00
Andy Goldstein
2fde1f5fc1 Merge pull request #500 from nrb/fix-497
Exit when failing to update a backup's phase
2018-05-17 14:23:41 -04:00
Andy Goldstein
3a746a3f73 Merge pull request #496 from nrb/fix-477
Check restore Phase before downloading logs
2018-05-17 13:23:23 -04:00
Andy Goldstein
f288902e3e Merge pull request #490 from skriss/jsonpatch
Use JSON merge patches everywhere
2018-05-17 13:02:47 -04:00
Nolan Brubaker
7eac6675e8 Exit when failing to update a backup's phase
Signed-off-by: Nolan Brubaker <nolan@heptio.com>
2018-05-17 12:47:38 -04:00
Nolan Brubaker
f6761ddd00 Check restore Phase before downloading logs
Signed-off-by: Nolan Brubaker <nolan@heptio.com>
2018-05-17 12:20:55 -04:00
Dhananjay Sathe
e313d6200a Add documentation on how to use a selector negation
This will help users use the `--selector` flag to selectively exclude objects from being backed up by ark

workaround for #404 until dedicated flags are implemented

Signed-off-by: Dhananjay Sathe <dhanajaysathe@gmail.com>
2018-05-17 21:21:39 +05:30
Steve Kriss
5d74a92cf1 Merge pull request #484 from nrb/move-groupresources
Consolidate commonly used GroupResource objects
2018-05-15 14:02:29 -07:00
Nolan Brubaker
f936c55a37 Consolidate commonly used GroupResource objects
Signed-off-by: Nolan Brubaker <nolan@heptio.com>
2018-05-15 15:55:15 -04:00
Andy Goldstein
ad93135adb Merge pull request #483 from skriss/graceful-shutdown-on-sigs
shutdown gracefully on SIGINT/SIGTERM
2018-05-15 14:33:09 -04:00
Andy Goldstein
67263d2652 Merge pull request #486 from nrb/fix-475
Filter on resources that support get & delete
2018-05-15 10:58:56 -04:00
Nolan Brubaker
4fcd222777 Expand required resource verbs with get and delete
Some resources use GET for listing, which resulted in errors.

Signed-off-by: Nolan Brubaker <nolan@heptio.com>
2018-05-15 10:55:03 -04:00
Steve Kriss
6d6f734bc9 use json merge patches
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-05-14 14:34:24 -07:00
Andy Goldstein
014c0e2c4c Merge pull request #485 from skriss/fix-cohab
use new cohabitatingResources map for each backup
2018-05-14 15:02:33 -04:00
Steve Kriss
1c950aa17b unit test to ensure new cohabitatingResources map for each backup
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-05-14 10:23:21 -07:00
Steve Kriss
ed7fbc9178 use new cohabitatingResources map for each backup
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-05-11 16:30:14 -07:00
Steve Kriss
20f56e9868 cleanup plugin subprocesses on server exit
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-05-11 09:48:46 -07:00
Andy Goldstein
5bfd4f64db Merge pull request #482 from skriss/add-cohab-resources
add replicasets and daemonsets to cohabitating resources
2018-05-11 10:02:10 -04:00
Steve Kriss
09c20b51e6 shutdown gracefully on SIGINT/SIGTERM
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-05-10 16:22:14 -07:00
Steve Kriss
fbb5ead4e9 add replicasets and daemonsets to cohabitating resources
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-05-10 14:15:01 -07:00
Andy Goldstein
9fc1711d45 Merge pull request #480 from skriss/fix-azure-setup
Azure: fix issues with setup instructions on macOS
2018-05-09 16:56:51 -04:00
Steve Kriss
c4d1e705d3 Azure: fix issues with setup instructions on macOS
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-05-09 12:56:19 -07:00
Andy Goldstein
43b1f9a19e Merge pull request #418 from skriss/refactor-patch-tests
use typed structs for decoding patch JSON in unit tests
2018-05-09 15:44:21 -04:00
Andy Goldstein
ea83ed32f5 Merge pull request #470 from skriss/backup-cluster-roles-and-bindings
backups: include clusterroles/bindings that reference serviceaccounts
2018-05-09 13:21:13 -04:00
Steve Kriss
041cfc2173 backups: include clusterroles/bindings that reference serviceaccounts
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-05-09 10:03:55 -07:00
Andy Goldstein
50a5550291 Merge pull request #478 from skriss/osx-build-perf-fix
when building on macOS, bind-mount volumes with delegated config
2018-05-09 10:36:14 -04:00
Andy Goldstein
ef5ac7fd05 Merge pull request #463 from nrb/fix-454
Don't restore completed pods or jobs
2018-05-09 09:59:34 -04:00
Andy Goldstein
3f3deda3d4 Merge pull request #479 from skriss/image-tagging-doc
add image-tagging policy doc
2018-05-09 09:15:38 -04:00
Steve Kriss
9e521aa757 add image-tagging policy doc
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-05-08 16:09:19 -07:00
Steve Kriss
a280e8cfd2 when building on macOS, bind-mount volumes with delegated config
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-05-07 16:00:03 -07:00
Andy Goldstein
c4bb6501ca Merge pull request #476 from skriss/dep-cleanup
Dep cleanup
2018-05-07 23:33:06 +02:00
Steve Kriss
9affb3c92a clean up/organize Gopkg.toml
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-05-07 10:02:59 -07:00
Steve Kriss
e81de2491f remove duplicate github.com/satori/go.uuid dependency
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-05-07 10:02:59 -07:00
Steve Kriss
51928e9177 use typed structs for decoding patch JSON in unit tests
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-05-07 09:22:20 -07:00
Andy Goldstein
5d8d221157 Merge pull request #472 from skriss/sync-ns-bug
when syncing backups, set their namespace to current cluster's Ark ns
2018-05-07 18:16:47 +02:00
Steve Kriss
6754955bcd when syncing backups, set their namespace to current cluster's Ark ns
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-05-07 09:12:51 -07:00
Andy Goldstein
fbda82ed63 Merge pull request #456 from skriss/go-1.10
upgrade to go 1.10 / alpine 3.7
2018-05-07 17:32:07 +02:00
Nolan Brubaker
923870390b Skip completed jobs and pods when restoring
Completed jobs and pods may be useful in the backup for auditing
purposes, but don't recreate them when restoring.

Signed-off-by: Nolan Brubaker <nolan@heptio.com>
2018-05-02 12:16:54 -04:00
Jennifer Rondeau
96b0808e3a Merge pull request #460 from skriss/debugging-docs
add installation troubleshooting doc with common issues
2018-04-27 09:26:56 -04:00
Andy Goldstein
e6624506cf Merge pull request #465 from castrojo/update-slack
Add a direct link to the slack channel
2018-04-26 15:16:10 -04:00
Jorge O. Castro
6d46b5f1eb Add a direct link to the slack channel
Signed-off-by: Jorge O. Castro <jorge.castro@gmail.com>
2018-04-26 15:00:14 -04:00
Steve Kriss
193fdb7026 add installation troubleshooting doc with common issues
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-04-25 11:22:55 -07:00
Andy Goldstein
b6316aff70 Merge pull request #458 from gianrubio/review-docs
Fix invalid file
2018-04-25 07:13:50 -04:00
Giancarlo Rubio
1f7d5c18f2 Fix invalid file
Signed-off-by: Giancarlo Rubio <gianrubio@gmail.com>
2018-04-25 13:07:40 +02:00
Steve Kriss
fd1c8294ce use the go 1.10 build cache
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-04-24 13:12:54 -07:00
Steve Kriss
2889db72ac update to go 1.10 & alpine 3.7
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-04-24 13:12:54 -07:00
Andy Goldstein
18d6b233da Merge pull request #453 from nrb/fix-444
Make empty excludes string more accurate
2018-04-24 14:53:33 -04:00
Andy Goldstein
c8989231eb Merge pull request #417 from skriss/upgrade-k8s-deps
Upgrade apimachinery, client-go, api, kubernetes dependencies
2018-04-24 14:53:11 -04:00
Nolan Brubaker
05cb059b1a Make empty excludes string more accurate
Signed-off-by: Nolan Brubaker <nolan@heptio.com>
2018-04-24 14:34:56 -04:00
Andy Goldstein
ff6e9dd2f3 Merge pull request #452 from mattmoyer/update-build-image
Switch to upstream Go build images.
2018-04-24 11:07:28 -04:00
Matt Moyer
deae0e6ae1 Switch to upstream Go build images.
These internal `gcr.io/heptio-images/golang` images are deprecated. It looks like `git` and `bash` are the only things the Ark build needed that aren't in the upstream `golang:1.9-alpine3.6` image.

Signed-off-by: Matt Moyer <moyer@heptio.com>
2018-04-24 09:53:23 -05:00
Steve Kriss
3dc093c24a regenerate code with updated generator
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-04-20 12:59:22 -07:00
Steve Kriss
c7b52bf1fe upgrade build image to v1.10 dependencies
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-04-20 12:59:22 -07:00
Steve Kriss
989169dcfe remove unused kubernetes/pkg/util/version dependency
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-04-20 12:59:22 -07:00
Steve Kriss
31645d163e remove Gopkg.toml override for blackfriday (not needed)
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-04-20 12:59:10 -07:00
Steve Kriss
97e52f2b3c add timeout arg to rest.NewRequest()
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-04-20 12:58:53 -07:00
Steve Kriss
961d7f2924 kubernetes/pkg/printers/ShortHumanDuration -> apimachinery/pkg/util/duration
Signed-off-by: Steve Kriss <steve@heptio.com>
2018-04-20 12:58:53 -07:00
Steve Kriss
f87280d369 upgrade apimachinery, client-go, kubernetes, api deps
Update k8s.io/api to v1.10.0
Update k8s.io/apimachinery to v1.10.0
Update k8s.io/client-go to v7.0
Update k8s.io/kubernetes to v1.10

Signed-off-by: Steve Kriss <steve@heptio.com>
2018-04-20 12:58:20 -07:00
1011 changed files with 79792 additions and 30919 deletions

6
.gitignore vendored
View File

@@ -32,3 +32,9 @@ debug
.container-*
.vimrc
.go
.DS_Store
.push-*
.vscode
*.diff
_site/

View File

@@ -1,7 +1,7 @@
language: go
go:
- 1.9.x
- 1.10.x
sudo: required

View File

@@ -1,5 +1,73 @@
# Changelog
#### [v0.9.3](https://github.com/heptio/ark/releases/tag/v0.9.3) - 2018-08-10
#### Bug Fixes
* Initalize Prometheus metrics when creating a new schedule (#689, @lemaral)
#### [v0.9.2](https://github.com/heptio/ark/releases/tag/v0.9.2) - 2018-07-26
##### Bug Fixes:
* Fix issue where modifications made by backup item actions were not being saved to backup tarball (#704, @skriss)
#### [v0.9.1](https://github.com/heptio/ark/releases/tag/v0.9.1) - 2018-07-23
##### Bug Fixes:
* Require namespace for Ark's CRDs to already exist at server startup (#676, @skriss)
* Require all Ark CRDs to exist at server startup (#683, @skriss)
* Fix `latest` tagging in Makefile (#690, @skriss)
* Make Ark compatible with clusters that don't have the `rbac.authorization.k8s.io/v1` API group (#682, @nrb)
* Don't consider missing snapshots an error during backup deletion, limit backup deletion requests per backup to 1 (#687, @skriss)
#### [v0.9.0](https://github.com/heptio/ark/releases/tag/v0.9.0) - 2018-07-06
##### Highlights:
* Ark now has support for backing up and restoring Kubernetes volumes using a free open-source backup tool called [restic](https://github.com/restic/restic).
This provides users an out-of-the-box solution for backing up and restoring almost any type of Kubernetes volume, whether or not it has snapshot support
integrated with Ark. For more information, see the [documentation](https://github.com/heptio/ark/blob/master/docs/restic.md).
* Support for Prometheus metrics has been added! View total number of backup attempts (including success or failure), total backup size in bytes, and backup
durations. More metrics coming in future releases!
##### All New Features:
* Add restic support (#508 #532 #533 #534 #535 #537 #540 #541 #545 #546 #547 #548 #555 #557 #561 #563 #569 #570 #571 #606 #608 #610 #621 #631 #636, @skriss)
* Add prometheus metrics (#531 #551 #564, @ashish-amarnath @nrb)
* When backing up a service account, include cluster roles/cluster role bindings that reference it (#470, @skriss)
* When restoring service accounts, copy secrets/image pull secrets into the target cluster even if the service account already exists (#403, @nrb)
##### Bug Fixes / Other Changes:
* Upgrade to Kubernetes 1.10 dependencies (#417, @skriss)
* Upgrade to go 1.10 and alpine 3.7 (#456, @skriss)
* Display no excluded resources/namespaces as `<none>` rather than `*` (#453, @nrb)
* Skip completed jobs and pods when restoring (#463, @nrb)
* Set namespace correctly when syncing backups from object storage (#472, @skriss)
* When building on macOS, bind-mount volumes with delegated config (#478, @skriss)
* Add replica sets and daemonsets to cohabitating resources so they're not backed up twice (#482 #485, @skriss)
* Shut down the Ark server gracefully on SIGINT/SIGTERM (#483, @skriss)
* Only back up resources that support GET and DELETE in addition to LIST and CREATE (#486, @nrb)
* Show a better error message when trying to get an incomplete restore's logs (#496, @nrb)
* Stop processing when setting a backup deletion request's phase to `Deleting` fails (#500, @nrb)
* Add library code to install Ark's server components (#437 #506, @marpaia)
* Properly handle errors when backing up additional items (#512, @carlpett)
* Run post hooks even if backup actions fail (#514, @carlpett)
* GCP: fail backup if upload to object storage fails (#510, @nrb)
* AWS: don't require `region` as part of backup storage provider config (#455, @skriss)
* Ignore terminating resources while doing a backup (#526, @yastij)
* Log to stdout instead of stderr (#553, @ncdc)
* Move sample minio deployment's config to an emptyDir (#566, @runyontr)
* Add `omitempty` tag to optional API fields (@580, @nikhita)
* Don't restore PVs with a reclaim policy of `Delete` and no snapshot (#613, @ncdc)
* Don't restore mirror pods (#619, @ncdc)
##### Docs Contributors:
* @gianrubio
* @castrojo
* @dhananjaysathe
* @c-knowles
* @mattkelly
* @ae-v
* @hamidzr
#### [v0.8.3](https://github.com/heptio/ark/releases/tag/v0.8.3) - 2018-06-29
##### Bug Fixes:
@@ -8,7 +76,7 @@
#### [v0.8.2](https://github.com/heptio/ark/releases/tag/v0.8.2) - 2018-06-01
##### Bug Fixes:
* Don't crash when a PVC is missing spec.volumeName (#520, @ncdc)
* Don't crash when a persistent volume claim is missing spec.volumeName (#520, @ncdc)
#### [v0.8.1](https://github.com/heptio/ark/releases/tag/v0.8.1) - 2018-04-23

View File

@@ -1,4 +1,4 @@
# Copyright 2017 the Heptio Ark contributors.
# Copyright 2018 the Heptio Ark contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,14 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM alpine:3.6
FROM alpine:3.7
MAINTAINER Andy Goldstein <andy@heptio.com>
MAINTAINER Steve Kriss <steve@heptio.com>
RUN apk add --no-cache ca-certificates
ADD /bin/linux/amd64/ark /ark
ADD /bin/linux/amd64/ark-restic-restore-helper .
USER nobody:nobody
ENTRYPOINT ["/ark"]
ENTRYPOINT [ "/ark-restic-restore-helper" ]

31
Dockerfile-ark.alpine Normal file
View File

@@ -0,0 +1,31 @@
# Copyright 2017 the Heptio Ark contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM alpine:3.7
MAINTAINER Andy Goldstein <andy@heptio.com>
RUN apk add --no-cache ca-certificates
RUN apk add --update --no-cache bzip2 && \
wget --quiet https://github.com/restic/restic/releases/download/v0.9.1/restic_0.9.1_linux_amd64.bz2 && \
bunzip2 restic_0.9.1_linux_amd64.bz2 && \
mv restic_0.9.1_linux_amd64 /usr/bin/restic && \
chmod +x /usr/bin/restic
ADD /bin/linux/amd64/ark /ark
USER nobody:nobody
ENTRYPOINT ["/ark"]

262
Gopkg.lock generated
View File

@@ -37,18 +37,6 @@
revision = "f6e08fe5e4d45c9a66e40196d3fed5f37331d224"
version = "v8.1.1"
[[projects]]
name = "github.com/PuerkitoBio/purell"
packages = ["."]
revision = "0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/PuerkitoBio/urlesc"
packages = ["."]
revision = "de5bf2ad457846296e2031421a34e2568e304e35"
[[projects]]
name = "github.com/aws/aws-sdk-go"
packages = [
@@ -87,6 +75,12 @@
revision = "1f8fb9d0919e5a58992207db9512a03f76ab0274"
version = "v1.13.12"
[[projects]]
branch = "master"
name = "github.com/beorn7/perks"
packages = ["quantile"]
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
[[projects]]
name = "github.com/cpuguy83/go-md2man"
packages = ["md2man"]
@@ -114,15 +108,6 @@
]
revision = "bc6354cbbc295e925e4c611ffe90c1f287ee54db"
[[projects]]
name = "github.com/emicklei/go-restful"
packages = [
".",
"log"
]
revision = "68c9750c36bb8cb433f1b88c807b4b30df4acc40"
version = "v2.2.1"
[[projects]]
branch = "master"
name = "github.com/evanphx/json-patch"
@@ -141,30 +126,6 @@
revision = "20b96f641a5ea98f2f8619ff4f3e061cff4833bd"
version = "v1.28.2"
[[projects]]
branch = "master"
name = "github.com/go-openapi/jsonpointer"
packages = ["."]
revision = "779f45308c19820f1a69e9a4cd965f496e0da10f"
[[projects]]
branch = "master"
name = "github.com/go-openapi/jsonreference"
packages = ["."]
revision = "36d33bfe519efae5632669801b180bf1a245da3b"
[[projects]]
branch = "master"
name = "github.com/go-openapi/spec"
packages = ["."]
revision = "3faa0055dbbf2110abc1f3b4e3adbb22721e96e7"
[[projects]]
branch = "master"
name = "github.com/go-openapi/swag"
packages = ["."]
revision = "f3f9494671f93fcff853e3c6e9e948b3eb71e590"
[[projects]]
name = "github.com/gogo/protobuf"
packages = [
@@ -193,12 +154,6 @@
]
revision = "ab9f9a6dab164b7d1246e0e688b0ab7b94d8553e"
[[projects]]
branch = "master"
name = "github.com/google/btree"
packages = ["."]
revision = "316fb6d3f031ae8f4d457c6c5186b9e3ded70435"
[[projects]]
branch = "master"
name = "github.com/google/gofuzz"
@@ -221,15 +176,6 @@
revision = "ee43cbb60db7bd22502942cccbc39059117352ab"
version = "v0.1.0"
[[projects]]
branch = "master"
name = "github.com/gregjones/httpcache"
packages = [
".",
"diskcache"
]
revision = "c1f8028e62adb3d518b823a2f8e6a95c38bdd3aa"
[[projects]]
branch = "master"
name = "github.com/hashicorp/go-hclog"
@@ -287,20 +233,10 @@
version = "1.0.3"
[[projects]]
branch = "master"
name = "github.com/juju/ratelimit"
packages = ["."]
revision = "5b9ff866471762aa2ab2dced63c9fb6f53921342"
[[projects]]
branch = "master"
name = "github.com/mailru/easyjson"
packages = [
"buffer",
"jlexer",
"jwriter"
]
revision = "2f5df55504ebc322e4d52d34df6a1f5b503bf26d"
name = "github.com/matttproud/golang_protobuf_extensions"
packages = ["pbutil"]
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
version = "v1.0.1"
[[projects]]
branch = "master"
@@ -308,18 +244,6 @@
packages = ["."]
revision = "a61a99592b77c9ba629d254a693acffaeb4b7e28"
[[projects]]
branch = "master"
name = "github.com/petar/GoLLRB"
packages = ["llrb"]
revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
[[projects]]
name = "github.com/peterbourgon/diskv"
packages = ["."]
revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
version = "v2.0.1"
[[projects]]
name = "github.com/pkg/errors"
packages = ["."]
@@ -332,6 +256,42 @@
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
name = "github.com/prometheus/client_golang"
packages = [
"prometheus",
"prometheus/promhttp"
]
revision = "c5b7fccd204277076155f10851dad72b76a49317"
version = "v0.8.0"
[[projects]]
branch = "master"
name = "github.com/prometheus/client_model"
packages = ["go"]
revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
[[projects]]
branch = "master"
name = "github.com/prometheus/common"
packages = [
"expfmt",
"internal/bitbucket.org/ww/goautoneg",
"model"
]
revision = "7600349dcfe1abd18d72d3a1770870d9800a7801"
[[projects]]
branch = "master"
name = "github.com/prometheus/procfs"
packages = [
".",
"internal/util",
"nfs",
"xfs"
]
revision = "94663424ae5ae9856b40a9f170762b4197024661"
[[projects]]
name = "github.com/robfig/cron"
packages = ["."]
@@ -342,12 +302,6 @@
packages = ["."]
revision = "93622da34e54fb6529bfb7c57e710f37a8d9cbd8"
[[projects]]
name = "github.com/satori/go.uuid"
packages = ["."]
revision = "879c5887cd475cd7864858769793b2ceb0d44feb"
version = "v1.1.0"
[[projects]]
name = "github.com/satori/uuid"
packages = ["."]
@@ -460,11 +414,16 @@
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable",
"width"
"unicode/rangetable"
]
revision = "e56139fd9c5bc7244c76116c68e500765bb6db6b"
[[projects]]
branch = "master"
name = "golang.org/x/time"
packages = ["rate"]
revision = "26559e0f760e39c24d730d3224364aef164ee23f"
[[projects]]
branch = "master"
name = "google.golang.org/api"
@@ -577,8 +536,17 @@
"storage/v1alpha1",
"storage/v1beta1"
]
revision = "af4bc157c3a209798fc897f6d4aaaaeb6c2e0d6a"
version = "kubernetes-1.9.0"
revision = "73d903622b7391f3312dcbac6483fed484e185f8"
version = "kubernetes-1.10.0"
[[projects]]
branch = "master"
name = "k8s.io/apiextensions-apiserver"
packages = [
"pkg/apis/apiextensions",
"pkg/apis/apiextensions/v1beta1"
]
revision = "07bbbb7a28a34c56bf9d1b192a88cc9b2350095e"
[[projects]]
name = "k8s.io/apimachinery"
@@ -590,7 +558,7 @@
"pkg/apis/meta/internalversion",
"pkg/apis/meta/v1",
"pkg/apis/meta/v1/unstructured",
"pkg/apis/meta/v1alpha1",
"pkg/apis/meta/v1beta1",
"pkg/conversion",
"pkg/conversion/queryparams",
"pkg/fields",
@@ -608,30 +576,28 @@
"pkg/util/cache",
"pkg/util/clock",
"pkg/util/diff",
"pkg/util/duration",
"pkg/util/errors",
"pkg/util/framer",
"pkg/util/httpstream",
"pkg/util/httpstream/spdy",
"pkg/util/intstr",
"pkg/util/json",
"pkg/util/mergepatch",
"pkg/util/net",
"pkg/util/remotecommand",
"pkg/util/runtime",
"pkg/util/sets",
"pkg/util/strategicpatch",
"pkg/util/validation",
"pkg/util/validation/field",
"pkg/util/wait",
"pkg/util/yaml",
"pkg/version",
"pkg/watch",
"third_party/forked/golang/json",
"third_party/forked/golang/netutil",
"third_party/forked/golang/reflect"
]
revision = "180eddb345a5be3a157cea1c624700ad5bd27b8f"
version = "kubernetes-1.9.0"
revision = "302974c03f7e50f16561ba237db776ab93594ef6"
version = "kubernetes-1.10.0"
[[projects]]
name = "k8s.io/client-go"
@@ -639,6 +605,46 @@
"discovery",
"discovery/fake",
"dynamic",
"informers",
"informers/admissionregistration",
"informers/admissionregistration/v1alpha1",
"informers/admissionregistration/v1beta1",
"informers/apps",
"informers/apps/v1",
"informers/apps/v1beta1",
"informers/apps/v1beta2",
"informers/autoscaling",
"informers/autoscaling/v1",
"informers/autoscaling/v2beta1",
"informers/batch",
"informers/batch/v1",
"informers/batch/v1beta1",
"informers/batch/v2alpha1",
"informers/certificates",
"informers/certificates/v1beta1",
"informers/core",
"informers/core/v1",
"informers/events",
"informers/events/v1beta1",
"informers/extensions",
"informers/extensions/v1beta1",
"informers/internalinterfaces",
"informers/networking",
"informers/networking/v1",
"informers/policy",
"informers/policy/v1beta1",
"informers/rbac",
"informers/rbac/v1",
"informers/rbac/v1alpha1",
"informers/rbac/v1beta1",
"informers/scheduling",
"informers/scheduling/v1alpha1",
"informers/settings",
"informers/settings/v1alpha1",
"informers/storage",
"informers/storage/v1",
"informers/storage/v1alpha1",
"informers/storage/v1beta1",
"kubernetes",
"kubernetes/scheme",
"kubernetes/typed/admissionregistration/v1alpha1",
@@ -669,8 +675,35 @@
"kubernetes/typed/storage/v1",
"kubernetes/typed/storage/v1alpha1",
"kubernetes/typed/storage/v1beta1",
"listers/admissionregistration/v1alpha1",
"listers/admissionregistration/v1beta1",
"listers/apps/v1",
"listers/apps/v1beta1",
"listers/apps/v1beta2",
"listers/autoscaling/v1",
"listers/autoscaling/v2beta1",
"listers/batch/v1",
"listers/batch/v1beta1",
"listers/batch/v2alpha1",
"listers/certificates/v1beta1",
"listers/core/v1",
"listers/events/v1beta1",
"listers/extensions/v1beta1",
"listers/networking/v1",
"listers/policy/v1beta1",
"listers/rbac/v1",
"listers/rbac/v1alpha1",
"listers/rbac/v1beta1",
"listers/scheduling/v1alpha1",
"listers/settings/v1alpha1",
"listers/storage/v1",
"listers/storage/v1alpha1",
"listers/storage/v1beta1",
"pkg/apis/clientauthentication",
"pkg/apis/clientauthentication/v1alpha1",
"pkg/version",
"plugin/pkg/client/auth/azure",
"plugin/pkg/client/auth/exec",
"plugin/pkg/client/auth/gcp",
"plugin/pkg/client/auth/oidc",
"rest",
@@ -696,32 +729,21 @@
"util/homedir",
"util/integer",
"util/jsonpath",
"util/retry",
"util/workqueue"
]
revision = "78700dec6369ba22221b72770783300f143df150"
version = "v6.0.0"
[[projects]]
branch = "master"
name = "k8s.io/kube-openapi"
packages = [
"pkg/common",
"pkg/util/proto"
]
revision = "61b46af70dfed79c6d24530cd23b41440a7f22a5"
revision = "23781f4d6632d88e869066eaebb743857aa1ef9b"
version = "v7.0.0"
[[projects]]
name = "k8s.io/kubernetes"
packages = [
"pkg/printers",
"pkg/util/version"
]
revision = "925c127ec6b946659ad0fd596fa959be43f0cc05"
version = "v1.9.0"
packages = ["pkg/printers"]
revision = "fc32d2f3698e36b93322a3465f63a14e9f0eaead"
version = "v1.10.0"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "1ff4ae8599f679fbc275103407cfda3293b431888877531339c789749659b943"
inputs-digest = "a8e66580a3332bbe5ce086af0530dbab49bc5018f0d44b156e61bc404456a0ab"
solver-name = "gps-cdcl"
solver-version = 1

View File

@@ -25,9 +25,31 @@
non-go = true
go-tests = true
#
# Kubernetes packages
#
[[constraint]]
name = "cloud.google.com/go"
version = "0.11.0"
name = "k8s.io/kubernetes"
version = "~1.10"
[[constraint]]
name = "k8s.io/client-go"
version = "~7.0"
[[constraint]]
name = "k8s.io/apimachinery"
version = "kubernetes-1.10.0"
[[constraint]]
name = "k8s.io/api"
version = "kubernetes-1.10.0"
#
# Cloud provider packages
#
[[constraint]]
name = "github.com/aws/aws-sdk-go"
version = "1.13.12"
[[constraint]]
name = "github.com/Azure/azure-sdk-for-go"
@@ -37,34 +59,48 @@
name = "github.com/Azure/go-autorest"
version = "~8.1.x"
[[constraint]]
name = "github.com/aws/aws-sdk-go"
version = "1.13.12"
[[constraint]]
name = "cloud.google.com/go"
version = "0.11.0"
[[constraint]]
name = "google.golang.org/api"
branch = "master"
[[constraint]]
name = "golang.org/x/oauth2"
branch = "master"
#
# Third party packages
#
[[constraint]]
name = "github.com/golang/glog"
branch = "master"
[[constraint]]
name = "github.com/robfig/cron"
revision = "df38d32658d8788cd446ba74db4bb5375c4b0cb3"
[[constraint]]
name = "github.com/satori/go.uuid"
version = "1.1.0"
# TODO(1.0) this repo is a redirect to github.com/satori/go.uuid. Our
# current version of azure-sdk-for-go references this redirect, so
# use it so we don't get a duplicate copy of this dependency.
# Once our azure-sdk-for-go is updated to a newer version (where
# their dependency has changed to .../go.uuid), switch this to
# github.com/satori/go.uuid
[[constraint]]
name = "github.com/satori/uuid"
version = "1.1.0"
[[constraint]]
branch = "master"
name = "github.com/spf13/afero"
[[constraint]]
branch = "master"
[[constraint]]
name = "github.com/spf13/cobra"
branch = "master"
[[constraint]]
name = "github.com/spf13/pflag"
version = "1.0.0"
@@ -74,33 +110,5 @@
branch = "master"
[[constraint]]
branch = "master"
name = "golang.org/x/oauth2"
[[constraint]]
branch = "master"
name = "google.golang.org/api"
[[constraint]]
name = "k8s.io/kubernetes"
version = "~1.9"
[[constraint]]
name = "k8s.io/client-go"
version = "~6.0"
[[constraint]]
name = "k8s.io/apimachinery"
version = "kubernetes-1.9.0"
[[constraint]]
name = "k8s.io/api"
version = "kubernetes-1.9.0"
[[override]]
name = "github.com/russross/blackfriday"
revision = "93622da34e54fb6529bfb7c57e710f37a8d9cbd8"
[[constraint]]
branch = "master"
name = "github.com/hashicorp/go-plugin"
branch = "master"

View File

@@ -15,7 +15,7 @@
# limitations under the License.
# The binary to build (just the basename).
BIN := ark
BIN ?= ark
# This repo's root import path (under GOPATH).
PKG := github.com/heptio/ark
@@ -28,6 +28,8 @@ ARCH ?= linux-amd64
VERSION ?= master
TAG_LATEST ?= false
###
### These variables should not need tweaking.
###
@@ -44,7 +46,7 @@ GOARCH = $(word 2, $(platform_temp))
# TODO(ncdc): support multiple image architectures once gcr.io supports manifest lists
# Set default base image dynamically for each arch
ifeq ($(GOARCH),amd64)
DOCKERFILE ?= Dockerfile.alpine
DOCKERFILE ?= Dockerfile-$(BIN).alpine
endif
#ifeq ($(GOARCH),arm)
# DOCKERFILE ?= Dockerfile.arm #armel/busybox
@@ -58,7 +60,9 @@ IMAGE := $(REGISTRY)/$(BIN)
# If you want to build all binaries, see the 'all-build' rule.
# If you want to build all containers, see the 'all-container' rule.
# If you want to build AND push all containers, see the 'all-push' rule.
all: build
all:
@$(MAKE) build
@$(MAKE) build BIN=ark-restic-restore-helper
build-%:
@$(MAKE) --no-print-directory ARCH=$* build
@@ -94,38 +98,50 @@ BUILDER_IMAGE := ark-builder
# Example: make shell CMD="date > datefile"
shell: build-dirs build-image
@# the volume bind-mount of $PWD/vendor/k8s.io/api is needed for code-gen to
@# function correctly (ref. https://github.com/kubernetes/kubernetes/pull/64567)
@docker run \
-i $(TTY) \
--rm \
-u $$(id -u):$$(id -g) \
-v "$$(pwd)/.go/pkg:/go/pkg" \
-v "$$(pwd)/.go/std:/go/std" \
-v "$$(pwd):/go/src/$(PKG)" \
-v "$$(pwd)/_output/bin:/output" \
-v "$$(pwd)/.go/std/$(GOOS)/$(GOARCH):/usr/local/go/pkg/$(GOOS)_$(GOARCH)_static" \
-v "$$(pwd)/vendor/k8s.io/api:/go/src/k8s.io/api:delegated" \
-v "$$(pwd)/.go/pkg:/go/pkg:delegated" \
-v "$$(pwd)/.go/std:/go/std:delegated" \
-v "$$(pwd):/go/src/$(PKG):delegated" \
-v "$$(pwd)/_output/bin:/output:delegated" \
-v "$$(pwd)/.go/std/$(GOOS)/$(GOARCH):/usr/local/go/pkg/$(GOOS)_$(GOARCH)_static:delegated" \
-v "$$(pwd)/.go/go-build:/.cache/go-build:delegated" \
-w /go/src/$(PKG) \
$(BUILDER_IMAGE) \
/bin/sh $(CMD)
DOTFILE_IMAGE = $(subst :,_,$(subst /,_,$(IMAGE))-$(VERSION))
all-containers:
$(MAKE) container
$(MAKE) container BIN=ark-restic-restore-helper
container: verify test .container-$(DOTFILE_IMAGE) container-name
.container-$(DOTFILE_IMAGE): _output/bin/$(GOOS)/$(GOARCH)/$(BIN) $(DOCKERFILE)
@cp $(DOCKERFILE) _output/.dockerfile-$(GOOS)-$(GOARCH)
@docker build -t $(IMAGE):$(VERSION) -f _output/.dockerfile-$(GOOS)-$(GOARCH) _output
@cp $(DOCKERFILE) _output/.dockerfile-$(BIN)-$(GOOS)-$(GOARCH)
@docker build -t $(IMAGE):$(VERSION) -f _output/.dockerfile-$(BIN)-$(GOOS)-$(GOARCH) _output
@docker images -q $(IMAGE):$(VERSION) > $@
container-name:
@echo "container: $(IMAGE):$(VERSION)"
all-push:
$(MAKE) push
$(MAKE) push BIN=ark-restic-restore-helper
push: .push-$(DOTFILE_IMAGE) push-name
.push-$(DOTFILE_IMAGE): .container-$(DOTFILE_IMAGE)
@docker push $(IMAGE):$(VERSION)
@if git describe --tags --exact-match >/dev/null 2>&1; \
then \
docker tag $(IMAGE):$(VERSION) $(IMAGE):latest; \
docker push $(IMAGE):latest; \
fi
ifeq ($(TAG_LATEST), true)
docker tag $(IMAGE):$(VERSION) $(IMAGE):latest
docker push $(IMAGE):latest
endif
@docker images -q $(IMAGE):$(VERSION) > $@
push-name:
@@ -171,7 +187,7 @@ tar-bin: build
build-dirs:
@mkdir -p _output/bin/$(GOOS)/$(GOARCH)
@mkdir -p .go/src/$(PKG) .go/pkg .go/bin .go/std/$(GOOS)/$(GOARCH)
@mkdir -p .go/src/$(PKG) .go/pkg .go/bin .go/std/$(GOOS)/$(GOARCH) .go/go-build
build-image:
cd hack/build-image && docker build -t $(BUILDER_IMAGE) .
@@ -181,4 +197,4 @@ clean:
rm -rf .go _output
docker rmi $(BUILDER_IMAGE)
ci: build verify test
ci: all verify test

172
README.md
View File

@@ -19,153 +19,11 @@ Ark consists of:
## More information
[The documentation][29] provides detailed information about building from source, architecture, extending Ark, and more.
## Getting started
The following example sets up the Ark server and client, then backs up and restores a sample application.
For simplicity, the example uses Minio, an S3-compatible storage service that runs locally on your cluster. See [Set up Ark with your cloud provider][3] for how to run on a cloud provider.
### Prerequisites
* Access to a Kubernetes cluster, version 1.7 or later. Version 1.7.5 or later is required to run `ark backup delete`.
* A DNS server on the cluster
* `kubectl` installed
### Download
Clone or fork the Ark repository:
```
git clone git@github.com:heptio/ark.git
```
NOTE: Make sure to check out the appropriate version. We recommend that you check out the latest tagged version. The master branch is under active development and might not be stable.
### Set up server
1. Start the server and the local storage service. In the root directory of Ark, run:
```bash
kubectl apply -f examples/common/00-prereqs.yaml
kubectl apply -f examples/minio/
```
NOTE: If you get an error about Config creation, wait for a minute, then run the commands again.
1. Deploy the example nginx application:
```bash
kubectl apply -f examples/nginx-app/base.yaml
```
1. Check to see that both the Ark and nginx deployments are successfully created:
```
kubectl get deployments -l component=ark --namespace=heptio-ark
kubectl get deployments --namespace=nginx-example
```
### Install client
For this example, we recommend that you [download a pre-built release][26].
You can also [build from source][7].
Make sure that you install somewhere in your `$PATH`.
### Back up
1. Create a backup for any object that matches the `app=nginx` label selector:
```
ark backup create nginx-backup --selector app=nginx
```
1. Simulate a disaster:
```
kubectl delete namespace nginx-example
```
1. To check that the nginx deployment and service are gone, run:
```
kubectl get deployments --namespace=nginx-example
kubectl get services --namespace=nginx-example
kubectl get namespace/nginx-example
```
You should get no results.
NOTE: You might need to wait for a few minutes for the namespace to be fully cleaned up.
### Restore
1. Run:
```
ark restore create --from-backup nginx-backup
```
1. Run:
```
ark restore get
```
After the restore finishes, the output looks like the following:
```
NAME BACKUP STATUS WARNINGS ERRORS CREATED SELECTOR
nginx-backup-20170727200524 nginx-backup Completed 0 0 2017-07-27 20:05:24 +0000 UTC <none>
```
NOTE: The restore can take a few moments to finish. During this time, the `STATUS` column reads `InProgress`.
After a successful restore, the `STATUS` column is `Completed`, and `WARNINGS` and `ERRORS` are 0. All objects in the `nginx-example` namespacee should be just as they were before you deleted them.
If there are errors or warnings, you can look at them in detail:
```
ark restore describe <RESTORE_NAME>
```
For more information, see [the debugging information][18].
### Clean up
If you want to delete any backups you created, including data in object storage and persistent
volume snapshots, you can run:
```
ark backup delete BACKUP_NAME
```
This asks the Ark server to delete all backup data associated with `BACKUP_NAME`. You need to do
this for each backup you want to permanently delete. A future version of Ark will allow you to
delete multiple backups by name or label selector.
Once fully removed, the backup is no longer visible when you run:
```
ark backup get BACKUP_NAME
```
If you want to uninstall Ark but preserve the backup data in object storage and persistent volume
snapshots, it is safe to remove the `heptio-ark` namespace and everything else created for this
example:
```
kubectl delete -f examples/common/
kubectl delete -f examples/minio/
kubectl delete -f examples/nginx-app/base.yaml
```
[The documentation][29] provides a getting started guide, plus information about building from source, architecture, extending Ark, and more.
## Troubleshooting
If you encounter issues, review the [troubleshooting docs][30], [file an issue][4], or talk to us on the [Kubernetes Slack team][25] channel `#ark-dr`.
If you encounter issues, review the [troubleshooting docs][30], [file an issue][4], or talk to us on the [#ark-dr channel][25] on the Kubernetes Slack server.
## Contributing
@@ -173,12 +31,12 @@ Thanks for taking the time to join our community and start contributing!
Feedback and discussion is available on [the mailing list][24].
#### Before you start
### Before you start
* Please familiarize yourself with the [Code of Conduct][8] before contributing.
* See [CONTRIBUTING.md][5] for instructions on the developer certificate of origin that we require.
#### Pull requests
### Pull requests
* We welcome pull requests. Feel free to dig through the [issues][4] and jump in.
@@ -189,30 +47,22 @@ See [the list of releases][6] to find out about feature changes.
[0]: https://github.com/heptio
[1]: https://travis-ci.org/heptio/ark.svg?branch=master
[2]: https://travis-ci.org/heptio/ark
[3]: /docs/cloud-common.md
[4]: https://github.com/heptio/ark/issues
[5]: https://github.com/heptio/ark/blob/master/CONTRIBUTING.md
[6]: https://github.com/heptio/ark/releases
[7]: /docs/build-from-scratch.md
[8]: https://github.com/heptio/ark/blob/master/CODE_OF_CONDUCT.md
[9]: https://kubernetes.io/docs/setup/
[10]: https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-with-homebrew-on-macos
[11]: https://kubernetes.io/docs/tasks/tools/install-kubectl/#tabset-1
[12]: https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/dns/README.md
[13]: /docs/output-file-format.md
[14]: https://github.com/kubernetes/kubernetes
[15]: https://aws.amazon.com/
[16]: https://cloud.google.com/
[17]: https://azure.microsoft.com/
[18]: /docs/debugging-restores.md
[19]: /docs/img/backup-process.png
[20]: https://kubernetes.io/docs/concepts/api-extension/custom-resources/#customresourcedefinitions
[21]: https://kubernetes.io/docs/concepts/api-extension/custom-resources/#custom-controllers
[22]: https://github.com/coreos/etcd
[24]: http://j.hept.io/ark-list
[25]: http://slack.kubernetes.io/
[26]: https://github.com/heptio/ark/releases
[27]: /docs/hooks.md
[28]: /docs/plugins.md
[25]: https://kubernetes.slack.com/messages/ark-dr
[29]: https://heptio.github.io/ark/
[30]: /docs/troubleshooting.md

View File

@@ -0,0 +1,77 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"time"
)
func main() {
if len(os.Args) != 2 {
fmt.Fprintln(os.Stderr, "ERROR: exactly one argument must be provided, the restore's UID")
os.Exit(1)
}
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if done() {
fmt.Println("All restic restores are done")
return
}
}
}
}
// done returns true if for each directory under /restores, a file exists
// within the .ark/ subdirectory whose name is equal to os.Args[1], or
// false otherwise
func done() bool {
children, err := ioutil.ReadDir("/restores")
if err != nil {
fmt.Fprintf(os.Stderr, "ERROR reading /restores directory: %s\n", err)
return false
}
for _, child := range children {
if !child.IsDir() {
fmt.Printf("%s is not a directory, skipping.\n", child.Name())
continue
}
doneFile := filepath.Join("/restores", child.Name(), ".ark", os.Args[1])
if _, err := os.Stat(doneFile); os.IsNotExist(err) {
fmt.Printf("Not found: %s\n", doneFile)
return false
} else if err != nil {
fmt.Fprintf(os.Stderr, "ERROR looking for %s: %s\n", doneFile, err)
return false
}
fmt.Printf("Found %s", doneFile)
}
return true
}

View File

@@ -1,22 +1,12 @@
# About Heptio Ark
# How Ark Works
Heptio Ark provides customizable degrees of recovery for all Kubernetes objects (Pods, Deployments, Jobs, Custom Resource Definitions, etc.), as well as for persistent volumes. This recovery can be cluster-wide, or fine-tuned according to object type, namespace, or labels.
Each Ark operation -- on-demand backup, scheduled backup, restore -- is a custom resource, defined with a Kubernetes [Custom Resource Definition (CRD)][20] and stored in [etcd][22]. The config custom resource specifies core information and options such as cloud provider settings. Ark also includes controllers that process the custom resources to perform backups, restores, and all related operations.
You can back up or restore all objects in your cluster, or you can filter objects by type, namespace, and/or label.
Ark is ideal for the disaster recovery use case, as well as for snapshotting your application state, prior to performing system operations on your cluster (e.g. upgrades).
## Features
Ark provides the following operations:
* On-demand backups
* Scheduled backups
* Restores
Each operation is a custom resource, defined with a Kubernetes [Custom Resource Definition (CRD)][20] and stored in [etcd][22]. An additional custom resource, Config, specifies required information and customized options, such as cloud provider settings. These resources are handled by [custom controllers][21] when their corresponding requests are submitted to the Kubernetes API server.
Each controller watches its custom resource for API requests (Ark operations), performs validations, and handles the logic for interacting with the cloud provider API -- for example, managing object storage and persistent volumes.
### On-demand backups
## On-demand backups
The **backup** operation:
@@ -29,25 +19,23 @@ need to tell a database to flush its in-memory buffers to disk before taking a s
Note that cluster backups are not strictly atomic. If Kubernetes objects are being created or edited at the time of backup, they might not be included in the backup. The odds of capturing inconsistent information are low, but it is possible.
### Scheduled backups
## Scheduled backups
The **schedule** operation allows you to back up your data at recurring intervals. The first backup is performed when the schedule is first created, and subsequent backups happen at the schedule's specified interval. These intervals are specified by a Cron expression.
A Schedule acts as a wrapper for Backups; when triggered, it creates them behind the scenes.
Scheduled backups are saved with the name `<SCHEDULE NAME>-<TIMESTAMP>`, where `<TIMESTAMP>` is formatted as *YYYYMMDDhhmmss*.
### Restores
## Restores
The **restore** operation allows you to restore all of the objects and persistent volumes from a previously created Backup. Heptio Ark supports multiple namespace remapping--for example, in a single restore, objects in namespace "abc" can be recreated under namespace "def", and the ones in "123" under "456".
The **restore** operation allows you to restore all of the objects and persistent volumes from a previously created backup. You can also restore only a filtered subset of objects and persistent volumes. Ark supports multiple namespace remapping--for example, in a single restore, objects in namespace "abc" can be recreated under namespace "def", and the objects in namespace "123" under "456".
Kubernetes objects that have been restored can be identified with a label that looks like `ark-restore=<BACKUP NAME>-<TIMESTAMP>`, where `<TIMESTAMP>` is formatted as *YYYYMMDDhhmmss*.
The default name of a restore is `<BACKUP NAME>-<TIMESTAMP>`, where `<TIMESTAMP>` is formatted as *YYYYMMDDhhmmss*. You can also specify a custom name. A restored object also includes a label with key `ark-restore` and value `<RESTORE NAME>`.
You can also run the Ark server in restore-only mode, which disables backup, schedule, and garbage collection functionality during disaster recovery.
## Backup workflow
Here's what happens when you run `ark backup create test-backup`:
When you run `ark backup create test-backup`:
1. The Ark client makes a call to the Kubernetes API server to create a `Backup` object.
@@ -57,24 +45,27 @@ Here's what happens when you run `ark backup create test-backup`:
1. The `BackupController` makes a call to the object storage service -- for example, AWS S3 -- to upload the backup file.
By default `ark backup create` makes disk snapshots of any persistent volumes. You can adjust the snapshots by specifying additional flags. See [the CLI help][30] for more information. Snapshots can be disabled with the option `--snapshot-volumes=false`.
By default, `ark backup create` makes disk snapshots of any persistent volumes. You can adjust the snapshots by specifying additional flags. See [the CLI help][30] for more information. Snapshots can be disabled with the option `--snapshot-volumes=false`.
![19]
## Set a backup to expire
When you create a backup, you can specify a TTL by adding the flag `--ttl <DURATION>`. If Ark sees that an existing Backup resource is expired, it removes:
When you create a backup, you can specify a TTL by adding the flag `--ttl <DURATION>`. If Ark sees that an existing backup resource is expired, it removes:
* The Backup resource
* The backup resource
* The backup file from cloud object storage
* All PersistentVolume snapshots
* All associated Restores
## Object storage sync
Heptio Ark treats object storage as the source of truth. It continuously checks to see that the correct Backup resources are always present. If there is a properly formatted backup file in the storage bucket, but no corresponding Backup resources in the Kubernetes API, Ark synchronizes the information from object storage to Kubernetes.
Heptio Ark treats object storage as the source of truth. It continuously checks to see that the correct backup resources are always present. If there is a properly formatted backup file in the storage bucket, but no corresponding backup resource in the Kubernetes API, Ark synchronizes the information from object storage to Kubernetes.
This allows restore functionality to work in a cluster migration scenario, where the original Backup objects do not exist in the new cluster. See the tutorials for details.
This allows restore functionality to work in a cluster migration scenario, where the original backup objects do not exist in the new cluster.
[19]: /img/backup-process.png
[20]: https://kubernetes.io/docs/concepts/api-extension/custom-resources/#customresourcedefinitions
[21]: https://kubernetes.io/docs/concepts/api-extension/custom-resources/#custom-controllers
[22]: https://github.com/coreos/etcd
[30]: https://github.com/heptio/ark/blob/master/docs/cli-reference/ark_create_backup.md

View File

@@ -139,7 +139,7 @@ Specify the following values in the example files:
* In `examples/aws/00-ark-config.yaml`:
* Replace `<YOUR_BUCKET>` and `<YOUR_REGION>`. See the [Config definition][6] for details.
* Replace `<YOUR_BUCKET>` and `<YOUR_REGION>` (for S3, region is optional and will be queried from the AWS S3 API if not provided). See the [Config definition][6] for details.
* (Optional) If you run the nginx example, in file `examples/nginx-app/with-pv.yaml`:
@@ -245,7 +245,7 @@ It can be set up for Ark by creating a role that will have required permissions,
--policy-name heptio-ark-policy \
--policy-document file://./heptio-ark-policy.json
```
4. Update AWS_ACCOUNT_ID & HEPTIO_ARK_ROLE_NAME in the file `examples/common/10-deployment-kube2iam.yaml`:
4. Update AWS_ACCOUNT_ID & HEPTIO_ARK_ROLE_NAME in the file `examples/aws/10-deployment-kube2iam.yaml`:
```
---

View File

@@ -28,10 +28,11 @@ The storage account can be created in the same Resource Group as your Kubernetes
separated into its own Resource Group. The example below shows the storage account created in a
separate `Ark_Backups` Resource Group.
The storage account needs to be created with a globally unique id since this is used for dns. The
random function ensures you don't have to come up with a unique name. The storage account is
created with encryption at rest capabilities (Microsoft managed keys) and is configured to only
allow access via https.
The storage account needs to be created with a globally unique id since this is used for dns. In
the sample script below, we're generating a random name using `uuidgen`, but you can come up with
this name however you'd like, following the [Azure naming rules for storage accounts][19]. The
storage account is created with encryption at rest capabilities (Microsoft managed keys) and is
configured to only allow access via https.
```bash
# Create a resource group for the backups storage account. Change the location as needed.
@@ -39,7 +40,7 @@ AZURE_BACKUP_RESOURCE_GROUP=Ark_Backups
az group create -n $AZURE_BACKUP_RESOURCE_GROUP --location WestUS
# Create the storage account
AZURE_STORAGE_ACCOUNT_ID="ark`cat /proc/sys/kernel/random/uuid | cut -d '-' -f5`"
AZURE_STORAGE_ACCOUNT_ID="ark$(uuidgen | cut -d '-' -f5 | tr '[A-Z]' '[a-z]')"
az storage account create \
--name $AZURE_STORAGE_ACCOUNT_ID \
--resource-group $AZURE_BACKUP_RESOURCE_GROUP \
@@ -57,7 +58,7 @@ az storage container create -n ark --public-access off --account-name $AZURE_STO
AZURE_STORAGE_KEY=`az storage account keys list \
--account-name $AZURE_STORAGE_ACCOUNT_ID \
--resource-group $AZURE_BACKUP_RESOURCE_GROUP \
--query [0].value \
--query '[0].value' \
-o tsv`
```
@@ -164,3 +165,4 @@ In the root of your Ark directory, run:
[8]: config-definition.md#azure
[17]: https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-application-objects
[18]: https://docs.microsoft.com/en-us/cli/azure/install-azure-cli
[19]: https://docs.microsoft.com/en-us/azure/architecture/best-practices/naming-conventions#storage

View File

@@ -38,6 +38,7 @@ operations can also be performed as 'ark backup get' and 'ark schedule create'.
* [ark describe](ark_describe.md) - Describe ark resources
* [ark get](ark_get.md) - Get ark resources
* [ark plugin](ark_plugin.md) - Work with plugins
* [ark restic](ark_restic.md) - Work with restic
* [ark restore](ark_restore.md) - Work with restores
* [ark schedule](ark_schedule.md) - Work with schedules
* [ark server](ark_server.md) - Run the ark server

View File

@@ -16,6 +16,7 @@ ark backup describe [NAME1] [NAME2] [NAME...] [flags]
```
-h, --help help for describe
-l, --selector string only show items matching this label selector
--volume-details display details of restic volume backups
```
### Options inherited from parent commands

View File

@@ -16,6 +16,7 @@ ark describe backups [NAME1] [NAME2] [NAME...] [flags]
```
-h, --help help for backups
-l, --selector string only show items matching this label selector
--volume-details display details of restic volume backups
```
### Options inherited from parent commands

View File

@@ -16,6 +16,7 @@ ark describe restores [NAME1] [NAME2] [NAME...] [flags]
```
-h, --help help for restores
-l, --selector string only show items matching this label selector
--volume-details display details of restic volume restores
```
### Options inherited from parent commands

View File

@@ -0,0 +1,35 @@
## ark restic
Work with restic
### Synopsis
Work with restic
### Options
```
-h, --help help for restic
```
### Options inherited from parent commands
```
--alsologtostderr log to standard error as well as files
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
--kubecontext string The context to use to talk to the Kubernetes apiserver. If unset defaults to whatever your current-context is (kubectl config current-context)
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory
--logtostderr log to standard error instead of files
-n, --namespace string The namespace in which Ark should operate (default "heptio-ark")
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
-v, --v Level log level for V logs
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
```
### SEE ALSO
* [ark](ark.md) - Back up and restore Kubernetes cluster resources.
* [ark restic repo](ark_restic_repo.md) - Work with restic repositories
* [ark restic server](ark_restic_server.md) - Run the ark restic server

View File

@@ -0,0 +1,34 @@
## ark restic repo
Work with restic repositories
### Synopsis
Work with restic repositories
### Options
```
-h, --help help for repo
```
### Options inherited from parent commands
```
--alsologtostderr log to standard error as well as files
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
--kubecontext string The context to use to talk to the Kubernetes apiserver. If unset defaults to whatever your current-context is (kubectl config current-context)
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory
--logtostderr log to standard error instead of files
-n, --namespace string The namespace in which Ark should operate (default "heptio-ark")
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
-v, --v Level log level for V logs
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
```
### SEE ALSO
* [ark restic](ark_restic.md) - Work with restic
* [ark restic repo get](ark_restic_repo_get.md) - Get restic repositories

View File

@@ -0,0 +1,41 @@
## ark restic repo get
Get restic repositories
### Synopsis
Get restic repositories
```
ark restic repo get [flags]
```
### Options
```
-h, --help help for get
--label-columns stringArray a comma-separated list of labels to be displayed as columns
-o, --output string Output display format. For create commands, display the object but do not send it to the server. Valid formats are 'table', 'json', and 'yaml'. (default "table")
-l, --selector string only show items matching this label selector
--show-labels show labels in the last column
```
### Options inherited from parent commands
```
--alsologtostderr log to standard error as well as files
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
--kubecontext string The context to use to talk to the Kubernetes apiserver. If unset defaults to whatever your current-context is (kubectl config current-context)
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory
--logtostderr log to standard error instead of files
-n, --namespace string The namespace in which Ark should operate (default "heptio-ark")
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
-v, --v Level log level for V logs
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
```
### SEE ALSO
* [ark restic repo](ark_restic_repo.md) - Work with restic repositories

View File

@@ -0,0 +1,38 @@
## ark restic server
Run the ark restic server
### Synopsis
Run the ark restic server
```
ark restic server [flags]
```
### Options
```
-h, --help help for server
--log-level the level at which to log. Valid values are debug, info, warning, error, fatal, panic. (default info)
```
### Options inherited from parent commands
```
--alsologtostderr log to standard error as well as files
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
--kubecontext string The context to use to talk to the Kubernetes apiserver. If unset defaults to whatever your current-context is (kubectl config current-context)
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory
--logtostderr log to standard error instead of files
-n, --namespace string The namespace in which Ark should operate (default "heptio-ark")
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
-v, --v Level log level for V logs
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
```
### SEE ALSO
* [ark restic](ark_restic.md) - Work with restic

View File

@@ -16,6 +16,7 @@ ark restore describe [NAME1] [NAME2] [NAME...] [flags]
```
-h, --help help for describe
-l, --selector string only show items matching this label selector
--volume-details display details of restic volume restores
```
### Options inherited from parent commands

View File

@@ -14,9 +14,10 @@ ark server [flags]
### Options
```
-h, --help help for server
--log-level the level at which to log. Valid values are debug, info, warning, error, fatal, panic. (default info)
--plugin-dir string directory containing Ark plugins (default "/plugins")
-h, --help help for server
--log-level the level at which to log. Valid values are debug, info, warning, error, fatal, panic. (default info)
--metrics-address string the address to expose prometheus metrics (default ":8085")
--plugin-dir string directory containing Ark plugins (default "/plugins")
```
### Options inherited from parent commands

View File

@@ -9,6 +9,8 @@ The Ark repository includes a set of example YAML files that specify the setting
* [Run Ark on Azure][2]
* [Use IBM Cloud Object Store as Ark's storage destination][4]
In version 0.9.0 and later, you can use Ark's integration with restic, which requires additional setup. See [Restic instructions][20].
## Examples
After you set up the Ark server, try these examples:
@@ -77,3 +79,4 @@ After you set up the Ark server, try these examples:
[3]: namespace.md
[4]: ibm-config.md
[19]: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#reclaiming
[20]: https://github.com/heptio/ark/blob/master/docs/restic.md

View File

@@ -56,7 +56,7 @@ The configurable parameters are as follows:
| `backupSyncPeriod` | metav1.Duration | 60m0s | How frequently Ark queries the object storage to make sure that the appropriate Backup resources have been created for existing backup files. |
| `gcSyncPeriod` | metav1.Duration | 60m0s | How frequently Ark queries the object storage to delete backup files that have passed their TTL. |
| `scheduleSyncPeriod` | metav1.Duration | 1m0s | How frequently Ark checks its Schedule resource objects to see if a backup needs to be initiated. |
| `resourcePriorities` | []string | `[namespaces, persistentvolumes, persistentvolumeclaims, secrets, configmaps]` | An ordered list that describes the order in which Kubernetes resource objects should be restored (also specified with the `<RESOURCE>.<GROUP>` format.<br><br>If a resource is not in this list, it is restored after all other prioritized resources. |
| `resourcePriorities` | []string | `[namespaces, persistentvolumes, persistentvolumeclaims, secrets, configmaps, serviceaccounts, limitranges]` | An ordered list that describes the order in which Kubernetes resource objects should be restored (also specified with the `<RESOURCE>.<GROUP>` format.<br><br>If a resource is not in this list, it is restored after all other prioritized resources. |
| `restoreOnlyMode` | bool | `false` | When RestoreOnly mode is on, functionality for backups, schedules, and expired backup deletion is *turned off*. Restores are made from existing backup files in object storage. |
### AWS
@@ -67,7 +67,7 @@ The configurable parameters are as follows:
| Key | Type | Default | Meaning |
| --- | --- | --- | --- |
| `region` | string | Required Field | *Example*: "us-east-1"<br><br>See [AWS documentation][3] for the full list. |
| `region` | string | Empty | *Example*: "us-east-1"<br><br>See [AWS documentation][3] for the full list.<br><br>Queried from the AWS S3 API if not provided. |
| `s3ForcePathStyle` | bool | `false` | Set this to `true` if you are using a local storage service like Minio. |
| `s3Url` | string | Required field for non-AWS-hosted storage| *Example*: http://minio:9000<br><br>You can specify the AWS S3 URL here for explicitness, but Ark can already generate it from `region`, and `bucket`. This field is primarily for local storage services like Minio.|
| `kmsKeyId` | string | Empty | *Example*: "502b409c-4da1-419f-a16e-eif453b3i49f" or "alias/`<KMS-Key-Alias-Name>`"<br><br>Specify an [AWS KMS key][10] id or alias to enable encryption of the backups stored in S3. Only works with AWS S3 and may require explicitly granting key usage rights.|

59
docs/debugging-install.md Normal file
View File

@@ -0,0 +1,59 @@
# Debugging Installation Issues
## General
### `invalid configuration: no configuration has been provided`
This typically means that no `kubeconfig` file can be found for the Ark client to use. Ark looks for a kubeconfig in the
following locations:
* the path specified by the `--kubeconfig` flag, if any
* the path specified by the `$KUBECONFIG` environment variable, if any
* `~/.kube/config`
### Backups or restores stuck in `New` phase
This means that the Ark controllers are not processing the backups/restores, which usually happens because the Ark server is not running. Check the pod description and logs for errors:
```
kubectl -n heptio-ark describe pods
kubectl -n heptio-ark logs deployment/ark
```
## AWS
### `NoCredentialProviders: no valid providers in chain`
This means that the secret containing the AWS IAM user credentials for Ark has not been created/mounted properly
into the Ark server pod. Ensure the following:
* The `cloud-credentials` secret exists in the Ark server's namespace
* The `cloud-credentials` secret has a single key, `cloud`, whose value is the contents of the `credentials-ark` file
* The `credentials-ark` file is formatted properly and has the correct values:
```
[default]
aws_access_key_id=<your AWS access key ID>
aws_secret_access_key=<your AWS secret access key>
```
* The `cloud-credentials` secret is defined as a volume for the Ark deployment
* The `cloud-credentials` secret is being mounted into the Ark server pod at `/credentials`
## Azure
### `Failed to refresh the Token` or `adal: Refresh request failed`
This means that the secrets containing the Azure service principal credentials for Ark has not been created/mounted
properly into the Ark server pod. Ensure the following:
* The `cloud-credentials` secret exists in the Ark server's namespace
* The `cloud-credentials` secret has seven keys and each one has the correct value (see [setup instructions](0))
* The `cloud-credentials` secret is defined as a volume for the Ark deployment
* The `cloud-credentials` secret is being mounted into the Ark server pod at `/credentials`
## GCE/GKE
### `open credentials/cloud: no such file or directory`
This means that the secret containing the GCE service account credentials for Ark has not been created/mounted properly
into the Ark server pod. Ensure the following:
* The `cloud-credentials` secret exists in the Ark server's namespace
* The `cloud-credentials` secret has a single key, `cloud`, whose value is the contents of the `credentials-ark` file
* The `cloud-credentials` secret is defined as a volume for the Ark deployment
* The `cloud-credentials` secret is being mounted into the Ark server pod at `/credentials`
[0]: azure-config#credentials-and-configuration

View File

@@ -3,7 +3,7 @@
## When is it appropriate to use Ark instead of etcd's built in backup/restore?
Etcd's backup/restore tooling is good for recovering from data loss in a single etcd cluster. For
example, it is a good idea to take a backup of etcd prior to upgrading etcd istelf. For more
example, it is a good idea to take a backup of etcd prior to upgrading etcd itself. For more
sophisticated management of your Kubernetes cluster backups and restores, we feel that Ark is
generally a better approach. It gives you the ability to throw away an unstable cluster and restore
your Kubernetes resources and data into a new cluster, which you can't do easily just by backing up

View File

@@ -3,7 +3,7 @@ You can deploy Ark on IBM [Public][5] or [Private][4] clouds, or even on any oth
To set up IBM Cloud Object Storage (COS) as Ark's destination, you:
* Create your COS instance
* Create your COS instance
* Create an S3 bucket
* Define a service that can store data in the bucket
* Configure and start the Ark server
@@ -15,8 +15,8 @@ If you dont have a COS instance, you can create a new one, according to the d
## Create an S3 bucket
Heptio Ark requires an object storage bucket to store backups in. See instructions in [Create some buckets to store your data][2].
## Define a service that can store data in the bucket.
The process of creating service credentials is described in [Service credentials][3].
## Define a service that can store data in the bucket.
The process of creating service credentials is described in [Service credentials][3].
Several comments:
1. The Ark service will write its backup into the bucket, so it requires the “Writer” access role.

21
docs/image-tagging.md Normal file
View File

@@ -0,0 +1,21 @@
# Image tagging policy
This document describes Ark's image tagging policy.
## Released versions
`gcr.io/heptio-images/ark:<SemVer>`
Ark follows the [Semantic Versioning](http://semver.org/) standard for releases. Each tag in the `github.com/heptio/ark` repository has a matching image, e.g. `gcr.io/heptio-images/ark:v0.8.0`.
### Latest
`gcr.io/heptio-images/ark:latest`
The `latest` tag follows the most recently released version of Ark.
## Development
`gcr.io/heptio-images/ark:master`
The `master` tag follows the latest commit to land on the `master` branch.

149
docs/quickstart.md Normal file
View File

@@ -0,0 +1,149 @@
## Getting started
The following example sets up the Ark server and client, then backs up and restores a sample application.
For simplicity, the example uses Minio, an S3-compatible storage service that runs locally on your cluster. See [Set up Ark with your cloud provider][3] for how to run on a cloud provider.
### Prerequisites
* Access to a Kubernetes cluster, version 1.7 or later. Version 1.7.5 or later is required to run `ark backup delete`.
* A DNS server on the cluster
* `kubectl` installed
### Download
Clone or fork the Ark repository:
```
git clone git@github.com:heptio/ark.git
```
NOTE: Make sure to check out the appropriate version. We recommend that you check out the latest tagged version. The master branch is under active development and might not be stable.
### Set up server
1. Start the server and the local storage service. In the root directory of Ark, run:
```bash
kubectl apply -f examples/common/00-prereqs.yaml
kubectl apply -f examples/minio/
```
NOTE: If you get an error about Config creation, wait for a minute, then run the commands again.
1. Deploy the example nginx application:
```bash
kubectl apply -f examples/nginx-app/base.yaml
```
1. Check to see that both the Ark and nginx deployments are successfully created:
```
kubectl get deployments -l component=ark --namespace=heptio-ark
kubectl get deployments --namespace=nginx-example
```
### Install client
[Download the client][26].
Make sure that you install somewhere in your PATH.
### Back up
1. Create a backup for any object that matches the `app=nginx` label selector:
```
ark backup create nginx-backup --selector app=nginx
```
Alternatively if you want to backup all objects *except* those matching the label `backup=ignore`:
```
ark backup create nginx-backup --selector 'backup notin (ignore)'
```
1. Simulate a disaster:
```
kubectl delete namespace nginx-example
```
1. To check that the nginx deployment and service are gone, run:
```
kubectl get deployments --namespace=nginx-example
kubectl get services --namespace=nginx-example
kubectl get namespace/nginx-example
```
You should get no results.
NOTE: You might need to wait for a few minutes for the namespace to be fully cleaned up.
### Restore
1. Run:
```
ark restore create --from-backup nginx-backup
```
1. Run:
```
ark restore get
```
After the restore finishes, the output looks like the following:
```
NAME BACKUP STATUS WARNINGS ERRORS CREATED SELECTOR
nginx-backup-20170727200524 nginx-backup Completed 0 0 2017-07-27 20:05:24 +0000 UTC <none>
```
NOTE: The restore can take a few moments to finish. During this time, the `STATUS` column reads `InProgress`.
After a successful restore, the `STATUS` column is `Completed`, and `WARNINGS` and `ERRORS` are 0. All objects in the `nginx-example` namespace should be just as they were before you deleted them.
If there are errors or warnings, you can look at them in detail:
```
ark restore describe <RESTORE_NAME>
```
For more information, see [the debugging information][18].
### Clean up
If you want to delete any backups you created, including data in object storage and persistent
volume snapshots, you can run:
```
ark backup delete BACKUP_NAME
```
This asks the Ark server to delete all backup data associated with `BACKUP_NAME`. You need to do
this for each backup you want to permanently delete. A future version of Ark will allow you to
delete multiple backups by name or label selector.
Once fully removed, the backup is no longer visible when you run:
```
ark backup get BACKUP_NAME
```
If you want to uninstall Ark but preserve the backup data in object storage and persistent volume
snapshots, it is safe to remove the `heptio-ark` namespace and everything else created for this
example:
```
kubectl delete -f examples/common/
kubectl delete -f examples/minio/
kubectl delete -f examples/nginx-app/base.yaml
```
[3]: /docs/cloud-common.md
[18]: /docs/debugging-restores.md
[26]: https://github.com/heptio/ark/releases

267
docs/restic.md Normal file
View File

@@ -0,0 +1,267 @@
# Restic Integration
As of version 0.9.0, Ark has support for backing up and restoring Kubernetes volumes using a free open-source backup tool called
[restic][1].
Ark has always allowed you to take snapshots of persistent volumes as part of your backups if youre using one of
the supported cloud providers block storage offerings (Amazon EBS Volumes, Azure Managed Disks, Google Persistent Disks).
Starting with version 0.6.0, we provide a plugin model that enables anyone to implement additional object and block storage
backends, outside the main Ark repository.
We integrated restic with Ark so that users have an out-of-the-box solution for backing up and restoring almost any type of Kubernetes
volume*. This is a new capability for Ark, not a replacement for existing functionality. If you're running on AWS, and
taking EBS snapshots as part of your regular Ark backups, there's no need to switch to using restic. However, if you've
been waiting for a snapshot plugin for your storage platform, or if you're using EFS, AzureFile, NFS, emptyDir,
local, or any other volume type that doesn't have a native snapshot concept, restic might be for you.
Restic is not tied to a specific storage platform, which means that this integration also paves the way for future work to enable
cross-volume-type data migrations. Stay tuned as this evolves!
\* hostPath volumes are not supported, but the [new local volume type][4] is supported.
## Setup
### Prerequisites
- A working install of Ark version 0.9.0 or later. See [Set up Ark][2]
- A local clone of [the latest release tag of the Ark repository][3]
#### Additional steps if upgrading from version 0.9 alpha
- Manually delete all of the repositories/data from your existing restic bucket
- Delete all Ark backups from your cluster using `ark backup delete`
- Delete all secrets named `ark-restic-credentials` across all namespaces in your cluster
### Instructions
1. Download an updated Ark client from the [latest release][3], and move it to a location in your PATH.
1. From the Ark root directory, run the following to create new custom resource definitions:
```bash
kubectl apply -f examples/common/00-prereqs.yaml
```
1. Run one of the following for your platform to create the daemonset:
- AWS: `kubectl apply -f examples/aws/20-restic-daemonset.yaml`
- Azure: `kubectl apply -f examples/azure/20-restic-daemonset.yaml`
- GCP: `kubectl apply -f examples/gcp/20-restic-daemonset.yaml`
- Minio: `kubectl apply -f examples/minio/30-restic-daemonset.yaml`
1. Create a new bucket for restic to store its data in, and give the `heptio-ark` IAM user access to it, similarly to
the main Ark bucket you've already set up. Note that this must be a different bucket than the main Ark bucket.
We plan to remove this limitation in a future release.
1. Uncomment `resticLocation` in your Ark config and set the value appropriately, then apply:
- AWS: `kubectl apply -f examples/aws/00-ark-config.yaml`
- Azure: `kubectl apply -f examples/azure/10-ark-config.yaml`
- GCP: `kubectl apply -f examples/gcp/00-ark-config.yaml`
- Minio: `kubectl apply -f examples/minio/10-ark-config.yaml`
Note that `resticLocation` may either be just a bucket name, e.g. `my-restic-bucket`, or a bucket name plus a prefix under
which you'd like the restic data to be stored, e.g. `my-restic-bucket/ark-repos`.
You're now ready to use Ark with restic.
## Back up
1. Run the following for each pod that contains a volume to back up:
```bash
kubectl -n YOUR_POD_NAMESPACE annotate pod/YOUR_POD_NAME backup.ark.heptio.com/backup-volumes=YOUR_VOLUME_NAME_1,YOUR_VOLUME_NAME_2,...
```
where the volume names are the names of the volumes in the pod spec.
For example, for the following pod:
```bash
apiVersion: v1
kind: Pod
metadata:
name: sample
namespace: foo
spec:
containers:
- image: k8s.gcr.io/test-webserver
name: test-webserver
volumeMounts:
- name: pvc-volume
mountPath: /volume-1
- name: emptydir-volume
mountPath: /volume-2
volumes:
- name: pvc-volume
persistentVolumeClaim:
claimName: test-volume-claim
- name: emptydir-volume
emptyDir: {}
```
You'd run:
```bash
kubectl -n foo annotate pod/sample backup.ark.heptio.com/backup-volumes=pvc-volume,emptydir-volume
```
This annotation can also be provided in a pod template spec if you use a controller to manage your pods.
1. Take an Ark backup:
```bash
ark backup create NAME OPTIONS...
```
1. When the backup completes, view information about the backups:
```bash
ark backup describe YOUR_BACKUP_NAME
kubectl -n heptio-ark get podvolumebackups -l ark.heptio.com/backup-name=YOUR_BACKUP_NAME -o yaml
```
## Restore
1. Restore from your Ark backup:
```bash
ark restore create --from-backup BACKUP_NAME OPTIONS...
```
1. When the restore completes, view information about your pod volume restores:
```bash
ark restore describe YOUR_RESTORE_NAME
kubectl -n heptio-ark get podvolumerestores -l ark.heptio.com/restore-name=YOUR_RESTORE_NAME -o yaml
```
## Limitations
- You cannot use the main Ark bucket for storing restic backups. We plan to address this issue
in a future release.
- `hostPath` volumes are not supported. [Local persistent volumes][4] are supported.
- Those of you familiar with [restic][1] may know that it encrypts all of its data. We've decided to use a static,
common encryption key for all restic repositories created by Ark. **This means that anyone who has access to your
bucket can decrypt your restic backup data**. Make sure that you limit access to the restic bucket
appropriately. We plan to implement full Ark backup encryption, including securing the restic encryption keys, in
a future release.
## Troubleshooting
Run the following checks:
Are your Ark server and daemonset pods running?
```bash
kubectl get pods -n heptio-ark
```
Does your restic repository exist, and is it ready?
```bash
ark restic repo get
ark restic repo get REPO_NAME -o yaml
```
Are there any errors in your Ark backup/restore?
```bash
ark backup describe BACKUP_NAME
ark backup logs BACKUP_NAME
ark restore describe RESTORE_NAME
ark restore logs RESTORE_NAME
```
What is the status of your pod volume backups/restores?
```bash
kubectl -n heptio-ark get podvolumebackups -l ark.heptio.com/backup-name=BACKUP_NAME -o yaml
kubectl -n heptio-ark get podvolumerestores -l ark.heptio.com/restore-name=RESTORE_NAME -o yaml
```
Is there any useful information in the Ark server or daemon pod logs?
```bash
kubectl -n heptio-ark logs deploy/ark
kubectl -n heptio-ark logs DAEMON_POD_NAME
```
**NOTE**: You can increase the verbosity of the pod logs by adding `--log-level=debug` as an argument
to the container command in the deployment/daemonset pod template spec.
## How backup and restore work with restic
We introduced three custom resource definitions and associated controllers:
- `ResticRepository` - represents/manages the lifecycle of Ark's [restic repositories][5]. Ark creates
a restic repository per namespace when the first restic backup for a namespace is requested. The controller
for this custom resource executes restic repository lifecycle commands -- `restic init`, `restic check`,
and `restic prune`.
You can see information about your Ark restic repositories by running `ark restic repo get`.
- `PodVolumeBackup` - represents a restic backup of a volume in a pod. The main Ark backup process creates
one or more of these when it finds an annotated pod. Each node in the cluster runs a controller for this
resource (in a daemonset) that handles the `PodVolumeBackups` for pods on that node. The controller executes
`restic backup` commands to backup pod volume data.
- `PodVolumeRestore` - represents a restic restore of a pod volume. The main Ark restore process creates one
or more of these when it encounters a pod that has associated restic backups. Each node in the cluster runs a
controller for this resource (in the same daemonset as above) that handles the `PodVolumeRestores` for pods
on that node. The controller executes `restic restore` commands to restore pod volume data.
### Backup
1. The main Ark backup process checks each pod that it's backing up for the annotation specifying a restic backup
should be taken (`backup.ark.heptio.com/backup-volumes`)
1. When found, Ark first ensures a restic repository exists for the pod's namespace, by:
- checking if a `ResticRepository` custom resource already exists
- if not, creating a new one, and waiting for the `ResticRepository` controller to init/check it
1. Ark then creates a `PodVolumeBackup` custom resource per volume listed in the pod annotation
1. The main Ark process now waits for the `PodVolumeBackup` resources to complete or fail
1. Meanwhile, each `PodVolumeBackup` is handled by the controller on the appropriate node, which:
- has a hostPath volume mount of `/var/lib/kubelet/pods` to access the pod volume data
- finds the pod volume's subdirectory within the above volume
- runs `restic backup`
- updates the status of the custom resource to `Completed` or `Failed`
1. As each `PodVolumeBackup` finishes, the main Ark process captures its restic snapshot ID and adds it as an annotation
to the copy of the pod JSON that's stored in the Ark backup. This will be used for restores, as seen in the next section.
### Restore
1. The main Ark restore process checks each pod that it's restoring for annotations specifying a restic backup
exists for a volume in the pod (`snapshot.ark.heptio.com/<volume-name>`)
1. When found, Ark first ensures a restic repository exists for the pod's namespace, by:
- checking if a `ResticRepository` custom resource already exists
- if not, creating a new one, and waiting for the `ResticRepository` controller to init/check it (note that
in this case, the actual repository should already exist in object storage, so the Ark controller will simply
check it for integrity)
1. Ark adds an init container to the pod, whose job is to wait for all restic restores for the pod to complete (more
on this shortly)
1. Ark creates the pod, with the added init container, by submitting it to the Kubernetes API
1. Ark creates a `PodVolumeRestore` custom resource for each volume to be restored in the pod
1. The main Ark process now waits for each `PodVolumeRestore` resource to complete or fail
1. Meanwhile, each `PodVolumeRestore` is handled by the controller on the appropriate node, which:
- has a hostPath volume mount of `/var/lib/kubelet/pods` to access the pod volume data
- waits for the pod to be running the init container
- finds the pod volume's subdirectory within the above volume
- runs `restic restore`
- on success, writes a file into the pod volume, in an `.ark` subdirectory, whose name is the UID of the Ark restore
that this pod volume restore is for
- updates the status of the custom resource to `Completed` or `Failed`
1. The init container that was added to the pod is running a process that waits until it finds a file
within each restored volume, under `.ark`, whose name is the UID of the Ark restore being run
1. Once all such files are found, the init container's process terminates successfully and the pod moves
on to running other init containers/the main containers.
[1]: https://github.com/restic/restic
[2]: cloud-common.md
[3]: https://github.com/heptio/ark/releases/
[4]: https://kubernetes.io/docs/concepts/storage/volumes/#local
[5]: http://restic.readthedocs.io/en/latest/100_references.html#terminology

View File

@@ -2,11 +2,14 @@
These tips can help you troubleshoot known issues. If they don't help, you can [file an issue][4], or talk to us on the [Kubernetes Slack team][25] channel `#ark-dr`.
* [Debug installation/setup issues][2]
* [Delete namespaces and backups][0]
* [Debug restores][1]
[0]: debugging-deletes.md
[1]: debugging-restores.md
[2]: debugging-install.md
[4]: https://github.com/heptio/ark/issues
[25]: http://slack.kubernetes.io/

View File

@@ -25,6 +25,12 @@ persistentVolumeProvider:
backupStorageProvider:
name: aws
bucket: <YOUR_BUCKET>
# Uncomment the below line to enable restic integration.
# The format for resticLocation is <bucket>[/<prefix>],
# e.g. "my-restic-bucket" or "my-restic-bucket/repos".
# This MUST be a different bucket than the main Ark bucket
# specified just above.
# resticLocation: <YOUR_RESTIC_LOCATION>
config:
region: <YOUR_REGION>
backupSyncPeriod: 30m

View File

@@ -26,12 +26,18 @@ spec:
component: ark
annotations:
iam.amazonaws.com/role: arn:aws:iam::<AWS_ACCOUNT_ID>:role/<HEPTIO_ARK_ROLE_NAME>
prometheus.io/scrape: "true"
prometheus.io/port: "8085"
prometheus.io/path: "/metrics"
spec:
restartPolicy: Always
serviceAccountName: ark
containers:
- name: ark
image: gcr.io/heptio-images/ark:latest
ports:
- name: metrics
containerPort: 8085
command:
- /ark
args:

View File

@@ -24,6 +24,10 @@ spec:
metadata:
labels:
component: ark
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8085"
prometheus.io/path: "/metrics"
spec:
restartPolicy: Always
serviceAccountName: ark
@@ -39,12 +43,18 @@ spec:
mountPath: /credentials
- name: plugins
mountPath: /plugins
- name: scratch
mountPath: /scratch
env:
- name: AWS_SHARED_CREDENTIALS_FILE
value: /credentials/cloud
- name: ARK_SCRATCH_DIR
value: /scratch
volumes:
- name: cloud-credentials
secret:
secretName: cloud-credentials
- name: plugins
emptyDir: {}
- name: scratch
emptyDir: {}

View File

@@ -0,0 +1,69 @@
# Copyright 2018 the Heptio Ark contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: restic
namespace: heptio-ark
spec:
selector:
matchLabels:
name: restic
template:
metadata:
labels:
name: restic
spec:
serviceAccountName: ark
securityContext:
runAsUser: 0
volumes:
- name: cloud-credentials
secret:
secretName: cloud-credentials
- name: host-pods
hostPath:
path: /var/lib/kubelet/pods
- name: scratch
emptyDir: {}
containers:
- name: ark
image: gcr.io/heptio-images/ark:latest
command:
- /ark
args:
- restic
- server
volumeMounts:
- name: cloud-credentials
mountPath: /credentials
- name: host-pods
mountPath: /host_pods
mountPropagation: HostToContainer
- name: scratch
mountPath: /scratch
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: HEPTIO_ARK_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: AWS_SHARED_CREDENTIALS_FILE
value: /credentials/cloud
- name: ARK_SCRATCH_DIR
value: /scratch

View File

@@ -24,12 +24,19 @@ spec:
metadata:
labels:
component: ark
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8085"
prometheus.io/path: "/metrics"
spec:
restartPolicy: Always
serviceAccountName: ark
containers:
- name: ark
image: gcr.io/heptio-images/ark:latest
ports:
- name: metrics
containerPort: 8085
command:
- /ark
args:
@@ -37,11 +44,18 @@ spec:
envFrom:
- secretRef:
name: cloud-credentials
env:
- name: ARK_SCRATCH_DIR
value: /scratch
volumeMounts:
- name: plugins
mountPath: /plugins
- name: scratch
mountPath: /scratch
volumes:
- name: plugins
emptyDir: {}
- name: scratch
emptyDir: {}
nodeSelector:
beta.kubernetes.io/os: linux

View File

@@ -25,6 +25,12 @@ persistentVolumeProvider:
backupStorageProvider:
name: azure
bucket: <YOUR_BUCKET>
# Uncomment the below line to enable restic integration.
# The format for resticLocation is <bucket>[/<prefix>],
# e.g. "my-restic-bucket" or "my-restic-bucket/repos".
# This MUST be a different bucket than the main Ark bucket
# specified just above.
# resticLocation: <YOUR_RESTIC_LOCATION>
backupSyncPeriod: 30m
gcSyncPeriod: 30m
scheduleSyncPeriod: 1m

View File

@@ -0,0 +1,75 @@
# Copyright 2018 the Heptio Ark contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: restic
namespace: heptio-ark
spec:
selector:
matchLabels:
name: restic
template:
metadata:
labels:
name: restic
spec:
serviceAccountName: ark
securityContext:
runAsUser: 0
volumes:
- name: host-pods
hostPath:
path: /var/lib/kubelet/pods
- name: scratch
emptyDir: {}
containers:
- name: ark
image: gcr.io/heptio-images/ark:latest
command:
- /ark
args:
- restic
- server
volumeMounts:
- name: host-pods
mountPath: /host_pods
mountPropagation: HostToContainer
- name: scratch
mountPath: /scratch
envFrom:
- secretRef:
name: cloud-credentials
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: HEPTIO_ARK_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: AZURE_ACCOUNT_NAME
valueFrom:
secretKeyRef:
name: cloud-credentials
key: AZURE_STORAGE_ACCOUNT_ID
- name: AZURE_ACCOUNT_KEY
valueFrom:
secretKeyRef:
name: cloud-credentials
key: AZURE_STORAGE_KEY
- name: ARK_SCRATCH_DIR
value: /scratch

View File

@@ -102,6 +102,51 @@ spec:
plural: deletebackuprequests
kind: DeleteBackupRequest
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: podvolumebackups.ark.heptio.com
labels:
component: ark
spec:
group: ark.heptio.com
version: v1
scope: Namespaced
names:
plural: podvolumebackups
kind: PodVolumeBackup
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: podvolumerestores.ark.heptio.com
labels:
component: ark
spec:
group: ark.heptio.com
version: v1
scope: Namespaced
names:
plural: podvolumerestores
kind: PodVolumeRestore
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: resticrepositories.ark.heptio.com
labels:
component: ark
spec:
group: ark.heptio.com
version: v1
scope: Namespaced
names:
plural: resticrepositories
kind: ResticRepository
---
apiVersion: v1
kind: Namespace

View File

@@ -23,6 +23,12 @@ persistentVolumeProvider:
backupStorageProvider:
name: gcp
bucket: <YOUR_BUCKET>
# Uncomment the below line to enable restic integration.
# The format for resticLocation is <bucket>[/<prefix>],
# e.g. "my-restic-bucket" or "my-restic-bucket/repos".
# This MUST be a different bucket than the main Ark bucket
# specified just above.
# resticLocation: <YOUR_RESTIC_LOCATION>
backupSyncPeriod: 30m
gcSyncPeriod: 30m
scheduleSyncPeriod: 1m

View File

@@ -24,12 +24,19 @@ spec:
metadata:
labels:
component: ark
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8085"
prometheus.io/path: "/metrics"
spec:
restartPolicy: Always
serviceAccountName: ark
containers:
- name: ark
image: gcr.io/heptio-images/ark:latest
ports:
- name: metrics
containerPort: 8085
command:
- /ark
args:
@@ -39,12 +46,18 @@ spec:
mountPath: /credentials
- name: plugins
mountPath: /plugins
- name: scratch
mountPath: /scratch
env:
- name: GOOGLE_APPLICATION_CREDENTIALS
value: /credentials/cloud
- name: ARK_SCRATCH_DIR
value: /scratch
volumes:
- name: cloud-credentials
secret:
secretName: cloud-credentials
- name: plugins
emptyDir: {}
- name: scratch
emptyDir: {}

View File

@@ -0,0 +1,69 @@
# Copyright 2018 the Heptio Ark contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: restic
namespace: heptio-ark
spec:
selector:
matchLabels:
name: restic
template:
metadata:
labels:
name: restic
spec:
serviceAccountName: ark
securityContext:
runAsUser: 0
volumes:
- name: cloud-credentials
secret:
secretName: cloud-credentials
- name: host-pods
hostPath:
path: /var/lib/kubelet/pods
- name: scratch
emptyDir: {}
containers:
- name: ark
image: gcr.io/heptio-images/ark:latest
command:
- /ark
args:
- restic
- server
volumeMounts:
- name: cloud-credentials
mountPath: /credentials
- name: host-pods
mountPath: /host_pods
mountPropagation: HostToContainer
- name: scratch
mountPath: /scratch
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: HEPTIO_ARK_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: GOOGLE_APPLICATION_CREDENTIALS
value: /credentials/cloud
- name: ARK_SCRATCH_DIR
value: /scratch

View File

@@ -21,6 +21,12 @@ metadata:
backupStorageProvider:
name: aws
bucket: <YOUR_BUCKET>
# Uncomment the below line to enable restic integration.
# The format for resticLocation is <bucket>[/<prefix>],
# e.g. "my-restic-bucket" or "my-restic-bucket/repos".
# This MUST be a different bucket than the main Ark bucket
# specified just above.
# resticLocation: <YOUR_RESTIC_LOCATION>
config:
region: <YOUR_REGION>
s3ForcePathStyle: "true"

View File

@@ -24,12 +24,19 @@ spec:
metadata:
labels:
component: ark
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8085"
prometheus.io/path: "/metrics"
spec:
restartPolicy: Always
serviceAccountName: ark
containers:
- name: ark
image: gcr.io/heptio-images/ark:latest
ports:
- name: metrics
containerPort: 8085
command:
- /ark
args:
@@ -39,12 +46,18 @@ spec:
mountPath: /credentials
- name: plugins
mountPath: /plugins
- name: scratch
mountPath: /scratch
env:
- name: AWS_SHARED_CREDENTIALS_FILE
value: /credentials/cloud
- name: ARK_SCRATCH_DIR
value: /scratch
volumes:
- name: cloud-credentials
secret:
secretName: cloud-credentials
- name: plugins
emptyDir: {}
- name: scratch
emptyDir: {}

View File

@@ -31,6 +31,8 @@ spec:
volumes:
- name: storage
emptyDir: {}
- name: config
emptyDir: {}
containers:
- name: minio
image: minio/minio:latest
@@ -38,6 +40,7 @@ spec:
args:
- server
- /storage
- --config-dir=/config
env:
- name: MINIO_ACCESS_KEY
value: "minio"
@@ -48,6 +51,8 @@ spec:
volumeMounts:
- name: storage
mountPath: "/storage"
- name: config
mountPath: "/config"
---
apiVersion: v1
@@ -94,6 +99,9 @@ spec:
name: minio-setup
spec:
restartPolicy: OnFailure
volumes:
- name: config
emptyDir: {}
containers:
- name: mc
image: minio/mc:latest
@@ -101,4 +109,7 @@ spec:
command:
- /bin/sh
- -c
- "mc config host add ark http://minio:9000 minio minio123 && mc mb -p ark/ark"
- "mc --config-folder=/config config host add ark http://minio:9000 minio minio123 && mc --config-folder=/config mb -p ark/ark"
volumeMounts:
- name: config
mountPath: "/config"

View File

@@ -21,6 +21,12 @@ metadata:
backupStorageProvider:
name: aws
bucket: ark
# Uncomment the below line to enable restic integration.
# The format for resticLocation is <bucket>[/<prefix>],
# e.g. "my-restic-bucket" or "my-restic-bucket/repos".
# This MUST be a different bucket than the main Ark bucket
# specified just above.
# resticLocation: <YOUR_RESTIC_LOCATION>
config:
region: minio
s3ForcePathStyle: "true"

View File

@@ -24,12 +24,19 @@ spec:
metadata:
labels:
component: ark
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8085"
prometheus.io/path: "/metrics"
spec:
restartPolicy: Always
serviceAccountName: ark
containers:
- name: ark
image: gcr.io/heptio-images/ark:latest
ports:
- name: metrics
containerPort: 8085
command:
- /ark
args:
@@ -39,12 +46,18 @@ spec:
mountPath: /credentials
- name: plugins
mountPath: /plugins
- name: scratch
mountPath: /scratch
env:
- name: AWS_SHARED_CREDENTIALS_FILE
value: /credentials/cloud
- name: ARK_SCRATCH_DIR
value: /scratch
volumes:
- name: cloud-credentials
secret:
secretName: cloud-credentials
- name: plugins
emptyDir: {}
- name: scratch
emptyDir: {}

View File

@@ -0,0 +1,69 @@
# Copyright 2018 the Heptio Ark contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: restic
namespace: heptio-ark
spec:
selector:
matchLabels:
name: restic
template:
metadata:
labels:
name: restic
spec:
serviceAccountName: ark
securityContext:
runAsUser: 0
volumes:
- name: cloud-credentials
secret:
secretName: cloud-credentials
- name: host-pods
hostPath:
path: /var/lib/kubelet/pods
- name: scratch
emptyDir: {}
containers:
- name: ark
image: gcr.io/heptio-images/ark:latest
command:
- /ark
args:
- restic
- server
volumeMounts:
- name: cloud-credentials
mountPath: /credentials
- name: host-pods
mountPath: /host_pods
mountPropagation: HostToContainer
- name: scratch
mountPath: /scratch
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: HEPTIO_ARK_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: AWS_SHARED_CREDENTIALS_FILE
value: /credentials/cloud
- name: ARK_SCRATCH_DIR
value: /scratch

View File

@@ -12,10 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM gcr.io/heptio-images/golang:1.9-alpine3.6
FROM golang:1.10-alpine3.7
RUN mkdir -p /go/src/k8s.io && \
RUN apk add --update --no-cache git bash && \
mkdir -p /go/src/k8s.io && \
cd /go/src/k8s.io && \
git clone -b kubernetes-1.9.0 https://github.com/kubernetes/code-generator && \
git clone -b kubernetes-1.9.0 https://github.com/kubernetes/apimachinery && \
git clone -b kubernetes-1.10.0 https://github.com/kubernetes/code-generator && \
git clone -b kubernetes-1.10.0 https://github.com/kubernetes/apimachinery && \
echo chmod -R a+w /go

View File

@@ -61,7 +61,7 @@ if [[ "${GOOS}" = "windows" ]]; then
OUTPUT="${OUTPUT}.exe"
fi
go build -i \
go build \
-o ${OUTPUT} \
-installsuffix "static" \
-ldflags "${LDFLAGS}" \

View File

@@ -23,6 +23,5 @@ export CGO_ENABLED=0
TARGETS=$(for d in "$@"; do echo ./$d/...; done)
echo "Running tests:"
go test -i -installsuffix "static" ${TARGETS}
go test -installsuffix "static" -timeout 60s ${TARGETS}
echo "Success!"

View File

@@ -44,7 +44,7 @@ type BackupSpec struct {
// SnapshotVolumes specifies whether to take cloud snapshots
// of any PV's referenced in the set of objects included
// in the Backup.
SnapshotVolumes *bool `json:"snapshotVolumes"`
SnapshotVolumes *bool `json:"snapshotVolumes,omitempty"`
// TTL is a time.Duration-parseable string describing how long
// the Backup should be retained for.
@@ -80,7 +80,7 @@ type BackupResourceHookSpec struct {
// ExcludedResources specifies the resources to which this hook spec does not apply.
ExcludedResources []string `json:"excludedResources"`
// LabelSelector, if specified, filters the resources to which this hook spec applies.
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"`
// Hooks is a list of BackupResourceHooks to execute. DEPRECATED. Replaced by PreHooks.
Hooks []BackupResourceHook `json:"hooks"`
// PreHooks is a list of BackupResourceHooks to execute prior to storing the item in the backup.
@@ -170,6 +170,17 @@ type BackupStatus struct {
// ValidationErrors is a slice of all validation errors (if
// applicable).
ValidationErrors []string `json:"validationErrors"`
// StartTimestamp records the time a backup was started.
// Separate from CreationTimestamp, since that value changes
// on restores.
// The server's time is used for StartTimestamps
StartTimestamp metav1.Time `json:"startTimestamp"`
// CompletionTimestamp records the time a backup was completed.
// Completion time is recorded even on failed backups.
// The server's time is used for CompletionTimestamps
CompletionTimestamp metav1.Time `json:"completionTimestamp"`
}
// VolumeBackupInfo captures the required information about

View File

@@ -58,6 +58,10 @@ type Config struct {
// new backups that should be triggered based on schedules.
ScheduleSyncPeriod metav1.Duration `json:"scheduleSyncPeriod"`
// PodVolumeOperationTimeout is how long backups/restores of pod volumes (i.e.
// using restic) should be allowed to run before timing out.
PodVolumeOperationTimeout metav1.Duration `json:"podVolumeOperationTimeout"`
// ResourcePriorities is an ordered slice of resources specifying the desired
// order of resource restores. Any resources not in the list will be restored
// alphabetically after the prioritized resources.
@@ -86,4 +90,10 @@ type ObjectStorageProviderConfig struct {
// Bucket is the name of the bucket in object storage where Ark backups
// are stored.
Bucket string `json:"bucket"`
// ResticLocation is the bucket and optional prefix in object storage where
// Ark stores restic backups of pod volumes, specified either as "bucket" or
// "bucket/prefix". This bucket must be different than the `Bucket` field.
// Optional.
ResticLocation string `json:"resticLocation"`
}

View File

@@ -33,11 +33,6 @@ const (
DeleteBackupRequestPhaseInProgress DeleteBackupRequestPhase = "InProgress"
// DeleteBackupRequestPhaseProcessed means the DeleteBackupRequest has been processed.
DeleteBackupRequestPhaseProcessed DeleteBackupRequestPhase = "Processed"
// BackupNameLabel is the label key used by a DeleteBackupRequest to identify its backup by name.
BackupNameLabel = "ark.heptio.com/backup-name"
// BackupUIDLabel is the label key used by a DeleteBackupRequest to identify its backup by uid.
BackupUIDLabel = "ark.heptio.com/backup-uid"
)
// DeleteBackupRequestStatus is the current status of a DeleteBackupRequest.

View File

@@ -0,0 +1,39 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
const (
// BackupNameLabel is the label key used to identify a backup by name.
BackupNameLabel = "ark.heptio.com/backup-name"
// BackupUIDLabel is the label key used to identify a backup by uid.
BackupUIDLabel = "ark.heptio.com/backup-uid"
// RestoreNameLabel is the label key used to identify a restore by name.
RestoreNameLabel = "ark.heptio.com/restore-name"
// RestoreUIDLabel is the label key used to identify a restore by uid.
RestoreUIDLabel = "ark.heptio.com/restore-uid"
// PodUIDLabel is the label key used to identify a pod by uid.
PodUIDLabel = "ark.heptio.com/pod-uid"
// PodVolumeOperationTimeoutAnnotation is the annotation key used to apply
// a backup/restore-specific timeout value for pod volume operations (i.e.
// restic backups/restores).
PodVolumeOperationTimeoutAnnotation = "ark.heptio.com/pod-volume-timeout"
)

View File

@@ -0,0 +1,87 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
corev1api "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// PodVolumeBackupSpec is the specification for a PodVolumeBackup.
type PodVolumeBackupSpec struct {
// Node is the name of the node that the Pod is running on.
Node string `json:"node"`
// Pod is a reference to the pod containing the volume to be backed up.
Pod corev1api.ObjectReference `json:"pod"`
// Volume is the name of the volume within the Pod to be backed
// up.
Volume string `json:"volume"`
// RepoIdentifier is the restic repository identifier.
RepoIdentifier string `json:"repoIdentifier"`
// Tags are a map of key-value pairs that should be applied to the
// volume backup as tags.
Tags map[string]string `json:"tags"`
}
// PodVolumeBackupPhase represents the lifecycle phase of a PodVolumeBackup.
type PodVolumeBackupPhase string
const (
PodVolumeBackupPhaseNew PodVolumeBackupPhase = "New"
PodVolumeBackupPhaseInProgress PodVolumeBackupPhase = "InProgress"
PodVolumeBackupPhaseCompleted PodVolumeBackupPhase = "Completed"
PodVolumeBackupPhaseFailed PodVolumeBackupPhase = "Failed"
)
// PodVolumeBackupStatus is the current status of a PodVolumeBackup.
type PodVolumeBackupStatus struct {
// Phase is the current state of the PodVolumeBackup.
Phase PodVolumeBackupPhase `json:"phase"`
// Path is the full path within the controller pod being backed up.
Path string `json:"path"`
// SnapshotID is the identifier for the snapshot of the pod volume.
SnapshotID string `json:"snapshotID"`
// Message is a message about the pod volume backup's status.
Message string `json:"message"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type PodVolumeBackup struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
Spec PodVolumeBackupSpec `json:"spec"`
Status PodVolumeBackupStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodVolumeBackupList is a list of PodVolumeBackups.
type PodVolumeBackupList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []PodVolumeBackup `json:"items"`
}

View File

@@ -0,0 +1,76 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
corev1api "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// PodVolumeRestoreSpec is the specification for a PodVolumeRestore.
type PodVolumeRestoreSpec struct {
// Pod is a reference to the pod containing the volume to be restored.
Pod corev1api.ObjectReference `json:"pod"`
// Volume is the name of the volume within the Pod to be restored.
Volume string `json:"volume"`
// RepoIdentifier is the restic repository identifier.
RepoIdentifier string `json:"repoIdentifier"`
// SnapshotID is the ID of the volume snapshot to be restored.
SnapshotID string `json:"snapshotID"`
}
// PodVolumeRestorePhase represents the lifecycle phase of a PodVolumeRestore.
type PodVolumeRestorePhase string
const (
PodVolumeRestorePhaseNew PodVolumeRestorePhase = "New"
PodVolumeRestorePhaseInProgress PodVolumeRestorePhase = "InProgress"
PodVolumeRestorePhaseCompleted PodVolumeRestorePhase = "Completed"
PodVolumeRestorePhaseFailed PodVolumeRestorePhase = "Failed"
)
// PodVolumeRestoreStatus is the current status of a PodVolumeRestore.
type PodVolumeRestoreStatus struct {
// Phase is the current state of the PodVolumeRestore.
Phase PodVolumeRestorePhase `json:"phase"`
// Message is a message about the pod volume restore's status.
Message string `json:"message"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type PodVolumeRestore struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
Spec PodVolumeRestoreSpec `json:"spec"`
Status PodVolumeRestoreStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodVolumeRestoreList is a list of PodVolumeRestores.
type PodVolumeRestoreList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []PodVolumeRestore `json:"items"`
}

View File

@@ -41,21 +41,41 @@ func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
type typeInfo struct {
PluralName string
ItemType runtime.Object
ItemListType runtime.Object
}
func newTypeInfo(pluralName string, itemType, itemListType runtime.Object) typeInfo {
return typeInfo{
PluralName: pluralName,
ItemType: itemType,
ItemListType: itemListType,
}
}
// CustomResources returns a map of all custom resources within the Ark
// API group, keyed on Kind.
func CustomResources() map[string]typeInfo {
return map[string]typeInfo{
"Backup": newTypeInfo("backups", &Backup{}, &BackupList{}),
"Restore": newTypeInfo("restores", &Restore{}, &RestoreList{}),
"Schedule": newTypeInfo("schedules", &Schedule{}, &ScheduleList{}),
"Config": newTypeInfo("configs", &Config{}, &ConfigList{}),
"DownloadRequest": newTypeInfo("downloadrequests", &DownloadRequest{}, &DownloadRequestList{}),
"DeleteBackupRequest": newTypeInfo("deletebackuprequests", &DeleteBackupRequest{}, &DeleteBackupRequestList{}),
"PodVolumeBackup": newTypeInfo("podvolumebackups", &PodVolumeBackup{}, &PodVolumeBackupList{}),
"PodVolumeRestore": newTypeInfo("podvolumerestores", &PodVolumeRestore{}, &PodVolumeRestoreList{}),
"ResticRepository": newTypeInfo("resticrepositories", &ResticRepository{}, &ResticRepositoryList{}),
}
}
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&Backup{},
&BackupList{},
&Schedule{},
&ScheduleList{},
&Restore{},
&RestoreList{},
&Config{},
&ConfigList{},
&DownloadRequest{},
&DownloadRequestList{},
&DeleteBackupRequest{},
&DeleteBackupRequestList{},
)
for _, typeInfo := range CustomResources() {
scheme.AddKnownTypes(SchemeGroupVersion, typeInfo.ItemType, typeInfo.ItemListType)
}
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}

View File

@@ -0,0 +1,72 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ResticRepositorySpec is the specification for a ResticRepository.
type ResticRepositorySpec struct {
// MaintenanceFrequency is how often maintenance should be run.
MaintenanceFrequency metav1.Duration `json:"maintenanceFrequency"`
// ResticIdentifier is the full restic-compatible string for identifying
// this repository.
ResticIdentifier string `json:"resticIdentifier"`
}
// ResticRepositoryPhase represents the lifecycle phase of a ResticRepository.
type ResticRepositoryPhase string
const (
ResticRepositoryPhaseNew ResticRepositoryPhase = "New"
ResticRepositoryPhaseReady ResticRepositoryPhase = "Ready"
ResticRepositoryPhaseNotReady ResticRepositoryPhase = "NotReady"
)
// ResticRepositoryStatus is the current status of a ResticRepository.
type ResticRepositoryStatus struct {
// Phase is the current state of the ResticRepository.
Phase ResticRepositoryPhase `json:"phase"`
// Message is a message about the current status of the ResticRepository.
Message string `json:"message"`
// LastMaintenanceTime is the last time maintenance was run.
LastMaintenanceTime metav1.Time `json:"lastMaintenanceTime"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ResticRepository struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
Spec ResticRepositorySpec `json:"spec"`
Status ResticRepositoryStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResticRepositoryList is a list of ResticRepositories.
type ResticRepositoryList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []ResticRepository `json:"items"`
}

View File

@@ -49,16 +49,16 @@ type RestoreSpec struct {
// LabelSelector is a metav1.LabelSelector to filter with
// when restoring individual objects from the backup. If empty
// or nil, all objects are included. Optional.
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"`
// RestorePVs specifies whether to restore all included
// PVs from snapshot (via the cloudprovider).
RestorePVs *bool `json:"restorePVs"`
RestorePVs *bool `json:"restorePVs,omitempty"`
// IncludeClusterResources specifies whether cluster-scoped resources
// should be included for consideration in the restore. If null, defaults
// to true.
IncludeClusterResources *bool `json:"includeClusterResources"`
IncludeClusterResources *bool `json:"includeClusterResources,omitempty"`
}
// RestorePhase is a string representation of the lifecycle phase

View File

@@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
@@ -49,9 +49,8 @@ func (in *Backup) DeepCopy() *Backup {
func (in *Backup) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -106,9 +105,8 @@ func (in *BackupList) DeepCopy() *BackupList {
func (in *BackupList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -288,6 +286,8 @@ func (in *BackupStatus) DeepCopyInto(out *BackupStatus) {
*out = make([]string, len(*in))
copy(*out, *in)
}
in.StartTimestamp.DeepCopyInto(&out.StartTimestamp)
in.CompletionTimestamp.DeepCopyInto(&out.CompletionTimestamp)
return
}
@@ -342,6 +342,7 @@ func (in *Config) DeepCopyInto(out *Config) {
out.BackupSyncPeriod = in.BackupSyncPeriod
out.GCSyncPeriod = in.GCSyncPeriod
out.ScheduleSyncPeriod = in.ScheduleSyncPeriod
out.PodVolumeOperationTimeout = in.PodVolumeOperationTimeout
if in.ResourcePriorities != nil {
in, out := &in.ResourcePriorities, &out.ResourcePriorities
*out = make([]string, len(*in))
@@ -364,9 +365,8 @@ func (in *Config) DeepCopy() *Config {
func (in *Config) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -398,9 +398,8 @@ func (in *ConfigList) DeepCopy() *ConfigList {
func (in *ConfigList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -427,9 +426,8 @@ func (in *DeleteBackupRequest) DeepCopy() *DeleteBackupRequest {
func (in *DeleteBackupRequest) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -461,9 +459,8 @@ func (in *DeleteBackupRequestList) DeepCopy() *DeleteBackupRequestList {
func (in *DeleteBackupRequestList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -527,9 +524,8 @@ func (in *DownloadRequest) DeepCopy() *DownloadRequest {
func (in *DownloadRequest) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -561,9 +557,8 @@ func (in *DownloadRequestList) DeepCopy() *DownloadRequestList {
func (in *DownloadRequestList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -655,6 +650,296 @@ func (in *ObjectStorageProviderConfig) DeepCopy() *ObjectStorageProviderConfig {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodVolumeBackup) DeepCopyInto(out *PodVolumeBackup) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeBackup.
func (in *PodVolumeBackup) DeepCopy() *PodVolumeBackup {
if in == nil {
return nil
}
out := new(PodVolumeBackup)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodVolumeBackup) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodVolumeBackupList) DeepCopyInto(out *PodVolumeBackupList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PodVolumeBackup, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeBackupList.
func (in *PodVolumeBackupList) DeepCopy() *PodVolumeBackupList {
if in == nil {
return nil
}
out := new(PodVolumeBackupList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodVolumeBackupList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodVolumeBackupSpec) DeepCopyInto(out *PodVolumeBackupSpec) {
*out = *in
out.Pod = in.Pod
if in.Tags != nil {
in, out := &in.Tags, &out.Tags
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeBackupSpec.
func (in *PodVolumeBackupSpec) DeepCopy() *PodVolumeBackupSpec {
if in == nil {
return nil
}
out := new(PodVolumeBackupSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodVolumeBackupStatus) DeepCopyInto(out *PodVolumeBackupStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeBackupStatus.
func (in *PodVolumeBackupStatus) DeepCopy() *PodVolumeBackupStatus {
if in == nil {
return nil
}
out := new(PodVolumeBackupStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodVolumeRestore) DeepCopyInto(out *PodVolumeRestore) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
out.Status = in.Status
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeRestore.
func (in *PodVolumeRestore) DeepCopy() *PodVolumeRestore {
if in == nil {
return nil
}
out := new(PodVolumeRestore)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodVolumeRestore) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodVolumeRestoreList) DeepCopyInto(out *PodVolumeRestoreList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PodVolumeRestore, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeRestoreList.
func (in *PodVolumeRestoreList) DeepCopy() *PodVolumeRestoreList {
if in == nil {
return nil
}
out := new(PodVolumeRestoreList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PodVolumeRestoreList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodVolumeRestoreSpec) DeepCopyInto(out *PodVolumeRestoreSpec) {
*out = *in
out.Pod = in.Pod
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeRestoreSpec.
func (in *PodVolumeRestoreSpec) DeepCopy() *PodVolumeRestoreSpec {
if in == nil {
return nil
}
out := new(PodVolumeRestoreSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodVolumeRestoreStatus) DeepCopyInto(out *PodVolumeRestoreStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeRestoreStatus.
func (in *PodVolumeRestoreStatus) DeepCopy() *PodVolumeRestoreStatus {
if in == nil {
return nil
}
out := new(PodVolumeRestoreStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResticRepository) DeepCopyInto(out *ResticRepository) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResticRepository.
func (in *ResticRepository) DeepCopy() *ResticRepository {
if in == nil {
return nil
}
out := new(ResticRepository)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResticRepository) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResticRepositoryList) DeepCopyInto(out *ResticRepositoryList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ResticRepository, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResticRepositoryList.
func (in *ResticRepositoryList) DeepCopy() *ResticRepositoryList {
if in == nil {
return nil
}
out := new(ResticRepositoryList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResticRepositoryList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResticRepositorySpec) DeepCopyInto(out *ResticRepositorySpec) {
*out = *in
out.MaintenanceFrequency = in.MaintenanceFrequency
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResticRepositorySpec.
func (in *ResticRepositorySpec) DeepCopy() *ResticRepositorySpec {
if in == nil {
return nil
}
out := new(ResticRepositorySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResticRepositoryStatus) DeepCopyInto(out *ResticRepositoryStatus) {
*out = *in
in.LastMaintenanceTime.DeepCopyInto(&out.LastMaintenanceTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResticRepositoryStatus.
func (in *ResticRepositoryStatus) DeepCopy() *ResticRepositoryStatus {
if in == nil {
return nil
}
out := new(ResticRepositoryStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Restore) DeepCopyInto(out *Restore) {
*out = *in
@@ -679,9 +964,8 @@ func (in *Restore) DeepCopy() *Restore {
func (in *Restore) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -713,9 +997,8 @@ func (in *RestoreList) DeepCopy() *RestoreList {
func (in *RestoreList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -871,9 +1154,8 @@ func (in *Schedule) DeepCopy() *Schedule {
func (in *Schedule) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -905,9 +1187,8 @@ func (in *ScheduleList) DeepCopy() *ScheduleList {
func (in *ScheduleList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

View File

@@ -19,8 +19,10 @@ package backup
import (
"archive/tar"
"compress/gzip"
"context"
"fmt"
"io"
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -34,6 +36,8 @@ import (
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cloudprovider"
"github.com/heptio/ark/pkg/discovery"
"github.com/heptio/ark/pkg/podexec"
"github.com/heptio/ark/pkg/restic"
"github.com/heptio/ark/pkg/util/collections"
kubeutil "github.com/heptio/ark/pkg/util/kube"
"github.com/heptio/ark/pkg/util/logging"
@@ -48,11 +52,13 @@ type Backupper interface {
// kubernetesBackupper implements Backupper.
type kubernetesBackupper struct {
dynamicFactory client.DynamicFactory
discoveryHelper discovery.Helper
podCommandExecutor podCommandExecutor
groupBackupperFactory groupBackupperFactory
snapshotService cloudprovider.SnapshotService
dynamicFactory client.DynamicFactory
discoveryHelper discovery.Helper
podCommandExecutor podexec.PodCommandExecutor
groupBackupperFactory groupBackupperFactory
snapshotService cloudprovider.SnapshotService
resticBackupperFactory restic.BackupperFactory
resticTimeout time.Duration
}
type itemKey struct {
@@ -73,19 +79,33 @@ func (i *itemKey) String() string {
return fmt.Sprintf("resource=%s,namespace=%s,name=%s", i.resource, i.namespace, i.name)
}
func cohabitatingResources() map[string]*cohabitatingResource {
return map[string]*cohabitatingResource{
"deployments": newCohabitatingResource("deployments", "extensions", "apps"),
"daemonsets": newCohabitatingResource("daemonsets", "extensions", "apps"),
"replicasets": newCohabitatingResource("replicasets", "extensions", "apps"),
"networkpolicies": newCohabitatingResource("networkpolicies", "extensions", "networking.k8s.io"),
"events": newCohabitatingResource("events", "", "events.k8s.io"),
}
}
// NewKubernetesBackupper creates a new kubernetesBackupper.
func NewKubernetesBackupper(
discoveryHelper discovery.Helper,
dynamicFactory client.DynamicFactory,
podCommandExecutor podCommandExecutor,
podCommandExecutor podexec.PodCommandExecutor,
snapshotService cloudprovider.SnapshotService,
resticBackupperFactory restic.BackupperFactory,
resticTimeout time.Duration,
) (Backupper, error) {
return &kubernetesBackupper{
discoveryHelper: discoveryHelper,
dynamicFactory: dynamicFactory,
podCommandExecutor: podCommandExecutor,
groupBackupperFactory: &defaultGroupBackupperFactory{},
snapshotService: snapshotService,
discoveryHelper: discoveryHelper,
dynamicFactory: dynamicFactory,
podCommandExecutor: podCommandExecutor,
groupBackupperFactory: &defaultGroupBackupperFactory{},
snapshotService: snapshotService,
resticBackupperFactory: resticBackupperFactory,
resticTimeout: resticTimeout,
}, nil
}
@@ -222,40 +242,51 @@ func (kb *kubernetesBackupper) Backup(backup *api.Backup, backupFile, logFile io
return err
}
var labelSelector string
if backup.Spec.LabelSelector != nil {
labelSelector = metav1.FormatLabelSelector(backup.Spec.LabelSelector)
}
backedUpItems := make(map[itemKey]struct{})
var errs []error
cohabitatingResources := map[string]*cohabitatingResource{
"deployments": newCohabitatingResource("deployments", "extensions", "apps"),
"networkpolicies": newCohabitatingResource("networkpolicies", "extensions", "networking.k8s.io"),
"events": newCohabitatingResource("events", "", "events.k8s.io"),
}
resolvedActions, err := resolveActions(actions, kb.discoveryHelper)
if err != nil {
return err
}
podVolumeTimeout := kb.resticTimeout
if val := backup.Annotations[api.PodVolumeOperationTimeoutAnnotation]; val != "" {
parsed, err := time.ParseDuration(val)
if err != nil {
log.WithError(errors.WithStack(err)).Errorf("Unable to parse pod volume timeout annotation %s, using server value.", val)
} else {
podVolumeTimeout = parsed
}
}
ctx, cancelFunc := context.WithTimeout(context.Background(), podVolumeTimeout)
defer cancelFunc()
var resticBackupper restic.Backupper
if kb.resticBackupperFactory != nil {
resticBackupper, err = kb.resticBackupperFactory.NewBackupper(ctx, backup)
if err != nil {
return errors.WithStack(err)
}
}
gb := kb.groupBackupperFactory.newGroupBackupper(
log,
backup,
namespaceIncludesExcludes,
resourceIncludesExcludes,
labelSelector,
kb.dynamicFactory,
kb.discoveryHelper,
backedUpItems,
cohabitatingResources,
cohabitatingResources(),
resolvedActions,
kb.podCommandExecutor,
tw,
resourceHooks,
kb.snapshotService,
resticBackupper,
newPVCSnapshotTracker(),
)
for _, group := range kb.discoveryHelper.Resources() {

View File

@@ -20,9 +20,9 @@ import (
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/kuberesource"
"github.com/heptio/ark/pkg/util/collections"
)
@@ -36,8 +36,6 @@ func NewBackupPVAction(log logrus.FieldLogger) ItemAction {
return &backupPVAction{log: log}
}
var pvGroupResource = schema.GroupResource{Group: "", Resource: "persistentvolumes"}
func (a *backupPVAction) AppliesTo() (ResourceSelector, error) {
return ResourceSelector{
IncludedResources: []string{"persistentvolumeclaims"},
@@ -63,7 +61,7 @@ func (a *backupPVAction) Execute(item runtime.Unstructured, backup *v1.Backup) (
}
additionalItems = append(additionalItems, ResourceIdentifier{
GroupResource: pvGroupResource,
GroupResource: kuberesource.PersistentVolumes,
Name: volumeName,
})

View File

@@ -20,6 +20,7 @@ import (
"testing"
"github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/kuberesource"
arktest "github.com/heptio/ark/pkg/util/test"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -56,5 +57,5 @@ func TestBackupPVAction(t *testing.T) {
_, additional, err = a.Execute(pvc, backup)
require.NoError(t, err)
require.Len(t, additional, 1)
assert.Equal(t, ResourceIdentifier{GroupResource: pvGroupResource, Name: "myVolume"}, additional[0])
assert.Equal(t, ResourceIdentifier{GroupResource: kuberesource.PersistentVolumes, Name: "myVolume"}, additional[0])
}

View File

@@ -19,7 +19,6 @@ package backup
import (
"bytes"
"compress/gzip"
"encoding/json"
"io"
"reflect"
"sort"
@@ -43,6 +42,8 @@ import (
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cloudprovider"
"github.com/heptio/ark/pkg/discovery"
"github.com/heptio/ark/pkg/podexec"
"github.com/heptio/ark/pkg/restic"
"github.com/heptio/ark/pkg/util/collections"
kubeutil "github.com/heptio/ark/pkg/util/kube"
arktest "github.com/heptio/ark/pkg/util/test"
@@ -505,7 +506,7 @@ func TestBackup(t *testing.T) {
dynamicFactory := &arktest.FakeDynamicFactory{}
podCommandExecutor := &mockPodCommandExecutor{}
podCommandExecutor := &arktest.MockPodCommandExecutor{}
defer podCommandExecutor.AssertExpectations(t)
b, err := NewKubernetesBackupper(
@@ -513,6 +514,8 @@ func TestBackup(t *testing.T) {
dynamicFactory,
podCommandExecutor,
nil,
nil, // restic backupper factory
0, // restic timeout
)
require.NoError(t, err)
kb := b.(*kubernetesBackupper)
@@ -524,27 +527,22 @@ func TestBackup(t *testing.T) {
groupBackupper := &mockGroupBackupper{}
defer groupBackupper.AssertExpectations(t)
cohabitatingResources := map[string]*cohabitatingResource{
"deployments": newCohabitatingResource("deployments", "extensions", "apps"),
"networkpolicies": newCohabitatingResource("networkpolicies", "extensions", "networking.k8s.io"),
"events": newCohabitatingResource("events", "", "events.k8s.io"),
}
groupBackupperFactory.On("newGroupBackupper",
mock.Anything, // log
test.backup,
test.expectedNamespaces,
test.expectedResources,
test.expectedLabelSelector,
dynamicFactory,
discoveryHelper,
map[itemKey]struct{}{}, // backedUpItems
cohabitatingResources,
cohabitatingResources(),
mock.Anything,
kb.podCommandExecutor,
mock.Anything, // tarWriter
test.expectedHooks,
mock.Anything,
mock.Anything, // restic backupper
mock.Anything, // pvc snapshot tracker
).Return(groupBackupper)
for group, err := range test.backupGroupErrors {
@@ -577,6 +575,81 @@ func TestBackup(t *testing.T) {
}
}
func TestBackupUsesNewCohabitatingResourcesForEachBackup(t *testing.T) {
discoveryHelper := &arktest.FakeDiscoveryHelper{
Mapper: &arktest.FakeMapper{
Resources: map[schema.GroupVersionResource]schema.GroupVersionResource{},
},
}
b, err := NewKubernetesBackupper(discoveryHelper, nil, nil, nil, nil, 0)
require.NoError(t, err)
kb := b.(*kubernetesBackupper)
groupBackupperFactory := &mockGroupBackupperFactory{}
kb.groupBackupperFactory = groupBackupperFactory
// assert that newGroupBackupper() is called with the result of cohabitatingResources()
// passed as an argument.
firstCohabitatingResources := cohabitatingResources()
groupBackupperFactory.On("newGroupBackupper",
mock.Anything,
mock.Anything,
mock.Anything,
mock.Anything,
mock.Anything,
discoveryHelper,
mock.Anything,
firstCohabitatingResources,
mock.Anything,
mock.Anything,
mock.Anything,
mock.Anything,
mock.Anything,
mock.Anything,
mock.Anything,
).Return(&mockGroupBackupper{})
assert.NoError(t, b.Backup(&v1.Backup{}, &bytes.Buffer{}, &bytes.Buffer{}, nil))
groupBackupperFactory.AssertExpectations(t)
// mutate the cohabitatingResources map that was used in the first backup to simulate
// the first backup process having done so.
for _, value := range firstCohabitatingResources {
value.seen = true
}
// assert that on a second backup, newGroupBackupper() is called with the result of
// cohabitatingResources() passed as an argument, that the value is not the
// same as the mutated firstCohabitatingResources value, and that all of the `seen`
// flags are false as they should be for a new instance
secondCohabitatingResources := cohabitatingResources()
groupBackupperFactory.On("newGroupBackupper",
mock.Anything,
mock.Anything,
mock.Anything,
mock.Anything,
mock.Anything,
discoveryHelper,
mock.Anything,
secondCohabitatingResources,
mock.Anything,
mock.Anything,
mock.Anything,
mock.Anything,
mock.Anything,
mock.Anything,
mock.Anything,
).Return(&mockGroupBackupper{})
assert.NoError(t, b.Backup(&v1.Backup{}, &bytes.Buffer{}, &bytes.Buffer{}, nil))
assert.NotEqual(t, firstCohabitatingResources, secondCohabitatingResources)
for _, resource := range secondCohabitatingResources {
assert.False(t, resource.seen)
}
groupBackupperFactory.AssertExpectations(t)
}
type mockGroupBackupperFactory struct {
mock.Mock
}
@@ -585,23 +658,23 @@ func (f *mockGroupBackupperFactory) newGroupBackupper(
log logrus.FieldLogger,
backup *v1.Backup,
namespaces, resources *collections.IncludesExcludes,
labelSelector string,
dynamicFactory client.DynamicFactory,
discoveryHelper discovery.Helper,
backedUpItems map[itemKey]struct{},
cohabitatingResources map[string]*cohabitatingResource,
actions []resolvedAction,
podCommandExecutor podCommandExecutor,
podCommandExecutor podexec.PodCommandExecutor,
tarWriter tarWriter,
resourceHooks []resourceHook,
snapshotService cloudprovider.SnapshotService,
resticBackupper restic.Backupper,
resticSnapshotTracker *pvcSnapshotTracker,
) groupBackupper {
args := f.Called(
log,
backup,
namespaces,
resources,
labelSelector,
dynamicFactory,
discoveryHelper,
backedUpItems,
@@ -611,6 +684,8 @@ func (f *mockGroupBackupperFactory) newGroupBackupper(
tarWriter,
resourceHooks,
snapshotService,
resticBackupper,
resticSnapshotTracker,
)
return args.Get(0).(groupBackupper)
}
@@ -624,26 +699,12 @@ func (gb *mockGroupBackupper) backupGroup(group *metav1.APIResourceList) error {
return args.Error(0)
}
func getAsMap(j string) (map[string]interface{}, error) {
m := make(map[string]interface{})
err := json.Unmarshal([]byte(j), &m)
return m, err
}
func toRuntimeObject(t *testing.T, data string) runtime.Object {
o, _, err := unstructured.UnstructuredJSONScheme.Decode([]byte(data), nil, nil)
require.NoError(t, err)
return o
}
func unstructuredOrDie(data string) *unstructured.Unstructured {
o, _, err := unstructured.UnstructuredJSONScheme.Decode([]byte(data), nil, nil)
if err != nil {
panic(err)
}
return o.(*unstructured.Unstructured)
}
func TestGetResourceHook(t *testing.T) {
tests := []struct {
name string

View File

@@ -31,6 +31,8 @@ import (
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cloudprovider"
"github.com/heptio/ark/pkg/discovery"
"github.com/heptio/ark/pkg/podexec"
"github.com/heptio/ark/pkg/restic"
"github.com/heptio/ark/pkg/util/collections"
)
@@ -39,16 +41,17 @@ type groupBackupperFactory interface {
log logrus.FieldLogger,
backup *v1.Backup,
namespaces, resources *collections.IncludesExcludes,
labelSelector string,
dynamicFactory client.DynamicFactory,
discoveryHelper discovery.Helper,
backedUpItems map[itemKey]struct{},
cohabitatingResources map[string]*cohabitatingResource,
actions []resolvedAction,
podCommandExecutor podCommandExecutor,
podCommandExecutor podexec.PodCommandExecutor,
tarWriter tarWriter,
resourceHooks []resourceHook,
snapshotService cloudprovider.SnapshotService,
resticBackupper restic.Backupper,
resticSnapshotTracker *pvcSnapshotTracker,
) groupBackupper
}
@@ -58,23 +61,23 @@ func (f *defaultGroupBackupperFactory) newGroupBackupper(
log logrus.FieldLogger,
backup *v1.Backup,
namespaces, resources *collections.IncludesExcludes,
labelSelector string,
dynamicFactory client.DynamicFactory,
discoveryHelper discovery.Helper,
backedUpItems map[itemKey]struct{},
cohabitatingResources map[string]*cohabitatingResource,
actions []resolvedAction,
podCommandExecutor podCommandExecutor,
podCommandExecutor podexec.PodCommandExecutor,
tarWriter tarWriter,
resourceHooks []resourceHook,
snapshotService cloudprovider.SnapshotService,
resticBackupper restic.Backupper,
resticSnapshotTracker *pvcSnapshotTracker,
) groupBackupper {
return &defaultGroupBackupper{
log: log,
backup: backup,
namespaces: namespaces,
resources: resources,
labelSelector: labelSelector,
dynamicFactory: dynamicFactory,
discoveryHelper: discoveryHelper,
backedUpItems: backedUpItems,
@@ -84,6 +87,8 @@ func (f *defaultGroupBackupperFactory) newGroupBackupper(
tarWriter: tarWriter,
resourceHooks: resourceHooks,
snapshotService: snapshotService,
resticBackupper: resticBackupper,
resticSnapshotTracker: resticSnapshotTracker,
resourceBackupperFactory: &defaultResourceBackupperFactory{},
}
}
@@ -96,16 +101,17 @@ type defaultGroupBackupper struct {
log logrus.FieldLogger
backup *v1.Backup
namespaces, resources *collections.IncludesExcludes
labelSelector string
dynamicFactory client.DynamicFactory
discoveryHelper discovery.Helper
backedUpItems map[itemKey]struct{}
cohabitatingResources map[string]*cohabitatingResource
actions []resolvedAction
podCommandExecutor podCommandExecutor
podCommandExecutor podexec.PodCommandExecutor
tarWriter tarWriter
resourceHooks []resourceHook
snapshotService cloudprovider.SnapshotService
resticBackupper restic.Backupper
resticSnapshotTracker *pvcSnapshotTracker
resourceBackupperFactory resourceBackupperFactory
}
@@ -119,7 +125,6 @@ func (gb *defaultGroupBackupper) backupGroup(group *metav1.APIResourceList) erro
gb.backup,
gb.namespaces,
gb.resources,
gb.labelSelector,
gb.dynamicFactory,
gb.discoveryHelper,
gb.backedUpItems,
@@ -129,6 +134,8 @@ func (gb *defaultGroupBackupper) backupGroup(group *metav1.APIResourceList) erro
gb.tarWriter,
gb.resourceHooks,
gb.snapshotService,
gb.resticBackupper,
gb.resticSnapshotTracker,
)
)

View File

@@ -23,6 +23,8 @@ import (
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cloudprovider"
"github.com/heptio/ark/pkg/discovery"
"github.com/heptio/ark/pkg/podexec"
"github.com/heptio/ark/pkg/restic"
"github.com/heptio/ark/pkg/util/collections"
arktest "github.com/heptio/ark/pkg/util/test"
"github.com/sirupsen/logrus"
@@ -38,7 +40,6 @@ func TestBackupGroup(t *testing.T) {
namespaces := collections.NewIncludesExcludes().Includes("a")
resources := collections.NewIncludesExcludes().Includes("b")
labelSelector := "foo=bar"
dynamicFactory := &arktest.FakeDynamicFactory{}
defer dynamicFactory.AssertExpectations(t)
@@ -64,7 +65,7 @@ func TestBackupGroup(t *testing.T) {
},
}
podCommandExecutor := &mockPodCommandExecutor{}
podCommandExecutor := &arktest.MockPodCommandExecutor{}
defer podCommandExecutor.AssertExpectations(t)
tarWriter := &fakeTarWriter{}
@@ -78,7 +79,6 @@ func TestBackupGroup(t *testing.T) {
backup,
namespaces,
resources,
labelSelector,
dynamicFactory,
discoveryHelper,
backedUpItems,
@@ -87,7 +87,9 @@ func TestBackupGroup(t *testing.T) {
podCommandExecutor,
tarWriter,
resourceHooks,
nil,
nil, // snapshot service
nil, // restic backupper
newPVCSnapshotTracker(),
).(*defaultGroupBackupper)
resourceBackupperFactory := &mockResourceBackupperFactory{}
@@ -102,7 +104,6 @@ func TestBackupGroup(t *testing.T) {
backup,
namespaces,
resources,
labelSelector,
dynamicFactory,
discoveryHelper,
backedUpItems,
@@ -112,6 +113,8 @@ func TestBackupGroup(t *testing.T) {
tarWriter,
resourceHooks,
nil,
mock.Anything, // restic backupper
mock.Anything, // pvc snapshot tracker
).Return(resourceBackupper)
group := &metav1.APIResourceList{
@@ -150,23 +153,23 @@ func (rbf *mockResourceBackupperFactory) newResourceBackupper(
backup *v1.Backup,
namespaces *collections.IncludesExcludes,
resources *collections.IncludesExcludes,
labelSelector string,
dynamicFactory client.DynamicFactory,
discoveryHelper discovery.Helper,
backedUpItems map[itemKey]struct{},
cohabitatingResources map[string]*cohabitatingResource,
actions []resolvedAction,
podCommandExecutor podCommandExecutor,
podCommandExecutor podexec.PodCommandExecutor,
tarWriter tarWriter,
resourceHooks []resourceHook,
snapshotService cloudprovider.SnapshotService,
resticBackupper restic.Backupper,
resticSnapshotTracker *pvcSnapshotTracker,
) resourceBackupper {
args := rbf.Called(
log,
backup,
namespaces,
resources,
labelSelector,
dynamicFactory,
discoveryHelper,
backedUpItems,
@@ -176,6 +179,8 @@ func (rbf *mockResourceBackupperFactory) newResourceBackupper(
tarWriter,
resourceHooks,
snapshotService,
resticBackupper,
resticSnapshotTracker,
)
return args.Get(0).(resourceBackupper)
}

View File

@@ -25,16 +25,21 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
corev1api "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
kubeerrs "k8s.io/apimachinery/pkg/util/errors"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cloudprovider"
"github.com/heptio/ark/pkg/discovery"
"github.com/heptio/ark/pkg/kuberesource"
"github.com/heptio/ark/pkg/podexec"
"github.com/heptio/ark/pkg/restic"
"github.com/heptio/ark/pkg/util/collections"
"github.com/heptio/ark/pkg/util/logging"
)
@@ -45,12 +50,14 @@ type itemBackupperFactory interface {
namespaces, resources *collections.IncludesExcludes,
backedUpItems map[itemKey]struct{},
actions []resolvedAction,
podCommandExecutor podCommandExecutor,
podCommandExecutor podexec.PodCommandExecutor,
tarWriter tarWriter,
resourceHooks []resourceHook,
dynamicFactory client.DynamicFactory,
discoveryHelper discovery.Helper,
snapshotService cloudprovider.SnapshotService,
resticBackupper restic.Backupper,
resticSnapshotTracker *pvcSnapshotTracker,
) ItemBackupper
}
@@ -61,12 +68,14 @@ func (f *defaultItemBackupperFactory) newItemBackupper(
namespaces, resources *collections.IncludesExcludes,
backedUpItems map[itemKey]struct{},
actions []resolvedAction,
podCommandExecutor podCommandExecutor,
podCommandExecutor podexec.PodCommandExecutor,
tarWriter tarWriter,
resourceHooks []resourceHook,
dynamicFactory client.DynamicFactory,
discoveryHelper discovery.Helper,
snapshotService cloudprovider.SnapshotService,
resticBackupper restic.Backupper,
resticSnapshotTracker *pvcSnapshotTracker,
) ItemBackupper {
ib := &defaultItemBackupper{
backup: backup,
@@ -82,6 +91,8 @@ func (f *defaultItemBackupperFactory) newItemBackupper(
itemHookHandler: &defaultItemHookHandler{
podCommandExecutor: podCommandExecutor,
},
resticBackupper: resticBackupper,
resticSnapshotTracker: resticSnapshotTracker,
}
// this is for testing purposes
@@ -95,24 +106,23 @@ type ItemBackupper interface {
}
type defaultItemBackupper struct {
backup *api.Backup
namespaces *collections.IncludesExcludes
resources *collections.IncludesExcludes
backedUpItems map[itemKey]struct{}
actions []resolvedAction
tarWriter tarWriter
resourceHooks []resourceHook
dynamicFactory client.DynamicFactory
discoveryHelper discovery.Helper
snapshotService cloudprovider.SnapshotService
backup *api.Backup
namespaces *collections.IncludesExcludes
resources *collections.IncludesExcludes
backedUpItems map[itemKey]struct{}
actions []resolvedAction
tarWriter tarWriter
resourceHooks []resourceHook
dynamicFactory client.DynamicFactory
discoveryHelper discovery.Helper
snapshotService cloudprovider.SnapshotService
resticBackupper restic.Backupper
resticSnapshotTracker *pvcSnapshotTracker
itemHookHandler itemHookHandler
additionalItemBackupper ItemBackupper
}
var podsGroupResource = schema.GroupResource{Group: "", Resource: "pods"}
var namespacesGroupResource = schema.GroupResource{Group: "", Resource: "namespaces"}
// backupItem backs up an individual item to tarWriter. The item may be excluded based on the
// namespaces IncludesExcludes list.
func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstructured, groupResource schema.GroupResource) error {
@@ -138,7 +148,7 @@ func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtim
// NOTE: we specifically allow namespaces to be backed up even if IncludeClusterResources is
// false.
if namespace == "" && groupResource != namespacesGroupResource && ib.backup.Spec.IncludeClusterResources != nil && !*ib.backup.Spec.IncludeClusterResources {
if namespace == "" && groupResource != kuberesource.Namespaces && ib.backup.Spec.IncludeClusterResources != nil && !*ib.backup.Spec.IncludeClusterResources {
log.Info("Excluding item because resource is cluster-scoped and backup.spec.includeClusterResources is false")
return nil
}
@@ -148,6 +158,10 @@ func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtim
return nil
}
if metadata.GetDeletionTimestamp() != nil {
log.Info("Skipping item because it's being deleted.")
return nil
}
key := itemKey{
resource: groupResource.String(),
namespace: namespace,
@@ -162,80 +176,77 @@ func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtim
log.Info("Backing up resource")
// Never save status
delete(obj.UnstructuredContent(), "status")
log.Debug("Executing pre hooks")
if err := ib.itemHookHandler.handleHooks(log, groupResource, obj, ib.resourceHooks, hookPhasePre); err != nil {
return err
}
for _, action := range ib.actions {
if !action.resourceIncludesExcludes.ShouldInclude(groupResource.String()) {
log.Debug("Skipping action because it does not apply to this resource")
continue
}
var (
backupErrs []error
pod *corev1api.Pod
resticVolumesToBackup []string
)
if namespace != "" && !action.namespaceIncludesExcludes.ShouldInclude(namespace) {
log.Debug("Skipping action because it does not apply to this namespace")
continue
}
if !action.selector.Matches(labels.Set(metadata.GetLabels())) {
log.Debug("Skipping action because label selector does not match")
continue
}
log.Info("Executing custom action")
if logSetter, ok := action.ItemAction.(logging.LogSetter); ok {
logSetter.SetLog(log)
}
if updatedItem, additionalItemIdentifiers, err := action.Execute(obj, ib.backup); err == nil {
obj = updatedItem
for _, additionalItem := range additionalItemIdentifiers {
gvr, resource, err := ib.discoveryHelper.ResourceFor(additionalItem.GroupResource.WithVersion(""))
if err != nil {
return err
}
client, err := ib.dynamicFactory.ClientForGroupVersionResource(gvr.GroupVersion(), resource, additionalItem.Namespace)
if err != nil {
return err
}
additionalItem, err := client.Get(additionalItem.Name, metav1.GetOptions{})
if err != nil {
return err
}
ib.additionalItemBackupper.backupItem(log, additionalItem, gvr.GroupResource())
}
if groupResource == kuberesource.Pods {
// pod needs to be initialized for the unstructured converter
pod = new(corev1api.Pod)
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), pod); err != nil {
backupErrs = append(backupErrs, errors.WithStack(err))
// nil it on error since it's not valid
pod = nil
} else {
// We want this to show up in the log file at the place where the error occurs. When we return
// the error, it get aggregated with all the other ones at the end of the backup, making it
// harder to tell when it happened.
log.WithError(err).Error("error executing custom action")
// get the volumes to backup using restic, and add any of them that are PVCs to the pvc snapshot
// tracker, so that when we backup PVCs/PVs via an item action in the next step, we don't snapshot
// PVs that will have their data backed up with restic.
resticVolumesToBackup = restic.GetVolumesToBackup(pod)
return errors.Wrapf(err, "error executing custom action (groupResource=%s, namespace=%s, name=%s)", groupResource.String(), namespace, name)
ib.resticSnapshotTracker.Track(pod, resticVolumesToBackup)
}
}
if groupResource == pvGroupResource {
updatedObj, err := ib.executeActions(log, obj, groupResource, name, namespace, metadata)
if err != nil {
log.WithError(err).Error("Error executing item actions")
backupErrs = append(backupErrs, err)
// if there was an error running actions, execute post hooks and return
log.Debug("Executing post hooks")
if err := ib.itemHookHandler.handleHooks(log, groupResource, obj, ib.resourceHooks, hookPhasePost); err != nil {
backupErrs = append(backupErrs, err)
}
return kubeerrs.NewAggregate(backupErrs)
}
obj = updatedObj
if groupResource == kuberesource.PersistentVolumes {
if ib.snapshotService == nil {
log.Debug("Skipping Persistent Volume snapshot because they're not enabled.")
} else {
if err := ib.takePVSnapshot(obj, ib.backup, log); err != nil {
return err
}
} else if err := ib.takePVSnapshot(obj, ib.backup, log); err != nil {
backupErrs = append(backupErrs, err)
}
}
if groupResource == kuberesource.Pods && pod != nil {
// this function will return partial results, so process volumeSnapshots
// even if there are errors.
volumeSnapshots, errs := ib.backupPodVolumes(log, pod, resticVolumesToBackup)
// annotate the pod with the successful volume snapshots
for volume, snapshot := range volumeSnapshots {
restic.SetPodSnapshotAnnotation(metadata, volume, snapshot)
}
backupErrs = append(backupErrs, errs...)
}
log.Debug("Executing post hooks")
if err := ib.itemHookHandler.handleHooks(log, groupResource, obj, ib.resourceHooks, hookPhasePost); err != nil {
return err
backupErrs = append(backupErrs, err)
}
if len(backupErrs) != 0 {
return kubeerrs.NewAggregate(backupErrs)
}
var filePath string
@@ -269,6 +280,86 @@ func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtim
return nil
}
// backupPodVolumes triggers restic backups of the specified pod volumes, and returns a map of volume name -> snapshot ID
// for volumes that were successfully backed up, and a slice of any errors that were encountered.
func (ib *defaultItemBackupper) backupPodVolumes(log logrus.FieldLogger, pod *corev1api.Pod, volumes []string) (map[string]string, []error) {
if len(volumes) == 0 {
return nil, nil
}
if ib.resticBackupper == nil {
log.Warn("No restic backupper, not backing up pod's volumes")
return nil, nil
}
return ib.resticBackupper.BackupPodVolumes(ib.backup, pod, log)
}
func (ib *defaultItemBackupper) executeActions(
log logrus.FieldLogger,
obj runtime.Unstructured,
groupResource schema.GroupResource,
name, namespace string,
metadata metav1.Object,
) (runtime.Unstructured, error) {
for _, action := range ib.actions {
if !action.resourceIncludesExcludes.ShouldInclude(groupResource.String()) {
log.Debug("Skipping action because it does not apply to this resource")
continue
}
if namespace != "" && !action.namespaceIncludesExcludes.ShouldInclude(namespace) {
log.Debug("Skipping action because it does not apply to this namespace")
continue
}
if !action.selector.Matches(labels.Set(metadata.GetLabels())) {
log.Debug("Skipping action because label selector does not match")
continue
}
log.Info("Executing custom action")
if logSetter, ok := action.ItemAction.(logging.LogSetter); ok {
logSetter.SetLog(log)
}
updatedItem, additionalItemIdentifiers, err := action.Execute(obj, ib.backup)
if err != nil {
// We want this to show up in the log file at the place where the error occurs. When we return
// the error, it get aggregated with all the other ones at the end of the backup, making it
// harder to tell when it happened.
log.WithError(err).Error("error executing custom action")
return nil, errors.Wrapf(err, "error executing custom action (groupResource=%s, namespace=%s, name=%s)", groupResource.String(), namespace, name)
}
obj = updatedItem
for _, additionalItem := range additionalItemIdentifiers {
gvr, resource, err := ib.discoveryHelper.ResourceFor(additionalItem.GroupResource.WithVersion(""))
if err != nil {
return nil, err
}
client, err := ib.dynamicFactory.ClientForGroupVersionResource(gvr.GroupVersion(), resource, additionalItem.Namespace)
if err != nil {
return nil, err
}
additionalItem, err := client.Get(additionalItem.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if err = ib.additionalItemBackupper.backupItem(log, additionalItem, gvr.GroupResource()); err != nil {
return nil, err
}
}
}
return obj, nil
}
// zoneLabel is the label that stores availability-zone info
// on PVs
const zoneLabel = "failure-domain.beta.kubernetes.io/zone"
@@ -276,7 +367,7 @@ const zoneLabel = "failure-domain.beta.kubernetes.io/zone"
// takePVSnapshot triggers a snapshot for the volume/disk underlying a PersistentVolume if the provided
// backup has volume snapshots enabled and the PV is of a compatible type. Also records cloud
// disk type and IOPS (if applicable) to be able to restore to current state later.
func (ib *defaultItemBackupper) takePVSnapshot(pv runtime.Unstructured, backup *api.Backup, log logrus.FieldLogger) error {
func (ib *defaultItemBackupper) takePVSnapshot(obj runtime.Unstructured, backup *api.Backup, log logrus.FieldLogger) error {
log.Info("Executing takePVSnapshot")
if backup.Spec.SnapshotVolumes != nil && !*backup.Spec.SnapshotVolumes {
@@ -284,7 +375,21 @@ func (ib *defaultItemBackupper) takePVSnapshot(pv runtime.Unstructured, backup *
return nil
}
metadata, err := meta.Accessor(pv)
pv := new(corev1api.PersistentVolume)
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), pv); err != nil {
return errors.WithStack(err)
}
// If this PV is claimed, see if we've already taken a (restic) snapshot of the contents
// of this PV. If so, don't take a snapshot.
if pv.Spec.ClaimRef != nil {
if ib.resticSnapshotTracker.Has(pv.Spec.ClaimRef.Namespace, pv.Spec.ClaimRef.Name) {
log.Info("Skipping Persistent Volume snapshot because volume has already been backed up.")
return nil
}
}
metadata, err := meta.Accessor(obj)
if err != nil {
return errors.WithStack(err)
}
@@ -299,7 +404,7 @@ func (ib *defaultItemBackupper) takePVSnapshot(pv runtime.Unstructured, backup *
log.Infof("label %q is not present on PersistentVolume", zoneLabel)
}
volumeID, err := ib.snapshotService.GetVolumeID(pv)
volumeID, err := ib.snapshotService.GetVolumeID(obj)
if err != nil {
return errors.Wrapf(err, "error getting volume ID for PersistentVolume")
}

View File

@@ -33,11 +33,14 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
corev1api "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
)
func TestBackupItemSkips(t *testing.T) {
@@ -48,6 +51,7 @@ func TestBackupItemSkips(t *testing.T) {
namespaces *collections.IncludesExcludes
groupResource schema.GroupResource
resources *collections.IncludesExcludes
terminating bool
backedUpItems map[itemKey]struct{}
}{
{
@@ -89,17 +93,37 @@ func TestBackupItemSkips(t *testing.T) {
{resource: "bar.foo", namespace: "ns", name: "foo"}: {},
},
},
{
testName: "terminating resource",
namespace: "ns",
name: "foo",
groupResource: schema.GroupResource{Group: "foo", Resource: "bar"},
namespaces: collections.NewIncludesExcludes(),
resources: collections.NewIncludesExcludes(),
terminating: true,
},
}
for _, test := range tests {
t.Run(test.testName, func(t *testing.T) {
ib := &defaultItemBackupper{
namespaces: test.namespaces,
resources: test.resources,
backedUpItems: test.backedUpItems,
}
u := unstructuredOrDie(fmt.Sprintf(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"%s","name":"%s"}}`, test.namespace, test.name))
pod := &corev1api.Pod{
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "Pod"},
ObjectMeta: metav1.ObjectMeta{Namespace: test.namespace, Name: test.name},
}
if test.terminating {
pod.ObjectMeta.DeletionTimestamp = &metav1.Time{Time: time.Now()}
}
unstructuredObj, unmarshalErr := runtime.DefaultUnstructuredConverter.ToUnstructured(pod)
require.NoError(t, unmarshalErr)
u := &unstructured.Unstructured{Object: unstructuredObj}
err := ib.backupItem(arktest.NewLogger(), u, test.groupResource)
assert.NoError(t, err)
})
@@ -118,7 +142,7 @@ func TestBackupItemSkipsClusterScopedResourceWhenIncludeClusterResourcesFalse(t
resources: collections.NewIncludesExcludes(),
}
u := unstructuredOrDie(`{"apiVersion":"v1","kind":"Foo","metadata":{"name":"bar"}}`)
u := arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Foo","metadata":{"name":"bar"}}`)
err := ib.backupItem(arktest.NewLogger(), u, schema.GroupResource{Group: "foo", Resource: "bar"})
assert.NoError(t, err)
}
@@ -139,6 +163,10 @@ func TestBackupItemNoSkips(t *testing.T) {
customActionAdditionalItems []runtime.Unstructured
groupResource string
snapshottableVolumes map[string]api.VolumeBackupInfo
snapshotError error
additionalItemError error
trackedPVCs sets.String
expectedTrackedPVCs sets.String
}{
{
name: "explicit namespace include",
@@ -163,13 +191,6 @@ func TestBackupItemNoSkips(t *testing.T) {
expectExcluded: false,
expectedTarHeaderName: "resources/resource.group/cluster/bar.json",
},
{
name: "make sure status is deleted",
item: `{"metadata":{"name":"bar"},"spec":{"color":"green"},"status":{"foo":"bar"}}`,
expectError: false,
expectExcluded: false,
expectedTarHeaderName: "resources/resource.group/cluster/bar.json",
},
{
name: "tar header write error",
item: `{"metadata":{"name":"bar"},"spec":{"color":"green"},"status":{"foo":"bar"}}`,
@@ -224,10 +245,37 @@ func TestBackupItemNoSkips(t *testing.T) {
},
},
customActionAdditionalItems: []runtime.Unstructured{
unstructuredOrDie(`{"apiVersion":"g1/v1","kind":"r1","metadata":{"namespace":"ns1","name":"n1"}}`),
unstructuredOrDie(`{"apiVersion":"g2/v1","kind":"r1","metadata":{"namespace":"ns2","name":"n2"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"g1/v1","kind":"r1","metadata":{"namespace":"ns1","name":"n1"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"g2/v1","kind":"r1","metadata":{"namespace":"ns2","name":"n2"}}`),
},
},
{
name: "action invoked - additional items - error",
namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
item: `{"metadata":{"namespace": "myns", "name":"bar"}}`,
expectError: true,
expectExcluded: false,
expectedTarHeaderName: "resources/resource.group/namespaces/myns/bar.json",
customAction: true,
expectedActionID: "myns/bar",
customActionAdditionalItemIdentifiers: []ResourceIdentifier{
{
GroupResource: schema.GroupResource{Group: "g1", Resource: "r1"},
Namespace: "ns1",
Name: "n1",
},
{
GroupResource: schema.GroupResource{Group: "g2", Resource: "r2"},
Namespace: "ns2",
Name: "n2",
},
},
customActionAdditionalItems: []runtime.Unstructured{
arktest.UnstructuredOrDie(`{"apiVersion":"g1/v1","kind":"r1","metadata":{"namespace":"ns1","name":"n1"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"g2/v1","kind":"r1","metadata":{"namespace":"ns2","name":"n2"}}`),
},
additionalItemError: errors.New("foo"),
},
{
name: "takePVSnapshot is not invoked for PVs when snapshotService == nil",
namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
@@ -249,6 +297,53 @@ func TestBackupItemNoSkips(t *testing.T) {
"vol-abc123": {SnapshotID: "snapshot-1", AvailabilityZone: "us-east-1c"},
},
},
{
name: "takePVSnapshot is not invoked for PVs when their claim is tracked in the restic PVC tracker",
namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
item: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv", "labels": {"failure-domain.beta.kubernetes.io/zone": "us-east-1c"}}, "spec": {"claimRef": {"namespace": "pvc-ns", "name": "pvc"}, "awsElasticBlockStore": {"volumeID": "aws://us-east-1c/vol-abc123"}}}`,
expectError: false,
expectExcluded: false,
expectedTarHeaderName: "resources/persistentvolumes/cluster/mypv.json",
groupResource: "persistentvolumes",
// empty snapshottableVolumes causes a snapshotService to be created, but no
// snapshots are expected to be taken.
snapshottableVolumes: map[string]api.VolumeBackupInfo{},
trackedPVCs: sets.NewString(key("pvc-ns", "pvc"), key("another-pvc-ns", "another-pvc")),
},
{
name: "takePVSnapshot is invoked for PVs when their claim is not tracked in the restic PVC tracker",
namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
item: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv", "labels": {"failure-domain.beta.kubernetes.io/zone": "us-east-1c"}}, "spec": {"claimRef": {"namespace": "pvc-ns", "name": "pvc"}, "awsElasticBlockStore": {"volumeID": "aws://us-east-1c/vol-abc123"}}}`,
expectError: false,
expectExcluded: false,
expectedTarHeaderName: "resources/persistentvolumes/cluster/mypv.json",
groupResource: "persistentvolumes",
snapshottableVolumes: map[string]api.VolumeBackupInfo{
"vol-abc123": {SnapshotID: "snapshot-1", AvailabilityZone: "us-east-1c"},
},
trackedPVCs: sets.NewString(key("another-pvc-ns", "another-pvc")),
},
{
name: "backup fails when takePVSnapshot fails",
namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
item: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv", "labels": {"failure-domain.beta.kubernetes.io/zone": "us-east-1c"}}, "spec": {"awsElasticBlockStore": {"volumeID": "aws://us-east-1c/vol-abc123"}}}`,
expectError: true,
groupResource: "persistentvolumes",
snapshottableVolumes: map[string]api.VolumeBackupInfo{
"vol-abc123": {SnapshotID: "snapshot-1", AvailabilityZone: "us-east-1c"},
},
snapshotError: fmt.Errorf("failure"),
},
{
name: "pod's restic PVC volume backups (only) are tracked",
item: `{"apiVersion": "v1", "kind": "Pod", "spec": {"volumes": [{"name": "volume-1", "persistentVolumeClaim": {"claimName": "bar"}},{"name": "volume-2", "persistentVolumeClaim": {"claimName": "baz"}},{"name": "volume-1", "emptyDir": {}}]}, "metadata":{"namespace":"foo","name":"bar", "annotations": {"backup.ark.heptio.com/backup-volumes": "volume-1,volume-2"}}}`,
namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"),
groupResource: "pods",
expectError: false,
expectExcluded: false,
expectedTarHeaderName: "resources/pods/namespaces/foo/bar.json",
expectedTrackedPVCs: sets.NewString(key("foo", "bar"), key("foo", "baz")),
},
}
for _, test := range tests {
@@ -267,7 +362,7 @@ func TestBackupItemNoSkips(t *testing.T) {
groupResource = schema.ParseGroupResource(test.groupResource)
}
item, err := getAsMap(test.item)
item, err := arktest.GetAsMap(test.item)
if err != nil {
t.Fatal(err)
}
@@ -300,7 +395,7 @@ func TestBackupItemNoSkips(t *testing.T) {
resourceHooks := []resourceHook{}
podCommandExecutor := &mockPodCommandExecutor{}
podCommandExecutor := &arktest.MockPodCommandExecutor{}
defer podCommandExecutor.AssertExpectations(t)
dynamicFactory := &arktest.FakeDynamicFactory{}
@@ -319,7 +414,9 @@ func TestBackupItemNoSkips(t *testing.T) {
resourceHooks,
dynamicFactory,
discoveryHelper,
nil,
nil, // snapshot service
nil, // restic backupper
newPVCSnapshotTracker(),
).(*defaultItemBackupper)
var snapshotService *arktest.FakeSnapshotService
@@ -327,10 +424,15 @@ func TestBackupItemNoSkips(t *testing.T) {
snapshotService = &arktest.FakeSnapshotService{
SnapshottableVolumes: test.snapshottableVolumes,
VolumeID: "vol-abc123",
Error: test.snapshotError,
}
b.snapshotService = snapshotService
}
if test.trackedPVCs != nil {
b.resticSnapshotTracker.pvcs = test.trackedPVCs
}
// make sure the podCommandExecutor was set correctly in the real hook handler
assert.Equal(t, podCommandExecutor, b.itemHookHandler.(*defaultItemHookHandler).podCommandExecutor)
@@ -347,6 +449,9 @@ func TestBackupItemNoSkips(t *testing.T) {
itemHookHandler.On("handleHooks", mock.Anything, groupResource, obj, resourceHooks, hookPhasePost).Return(nil)
for i, item := range test.customActionAdditionalItemIdentifiers {
if test.additionalItemError != nil && i > 0 {
break
}
itemClient := &arktest.FakeDynamicClient{}
defer itemClient.AssertExpectations(t)
@@ -354,7 +459,7 @@ func TestBackupItemNoSkips(t *testing.T) {
itemClient.On("Get", item.Name, metav1.GetOptions{}).Return(test.customActionAdditionalItems[i], nil)
additionalItemBackupper.On("backupItem", mock.AnythingOfType("*logrus.Entry"), test.customActionAdditionalItems[i], item.GroupResource).Return(nil)
additionalItemBackupper.On("backupItem", mock.AnythingOfType("*logrus.Entry"), test.customActionAdditionalItems[i], item.GroupResource).Return(test.additionalItemError)
}
err = b.backupItem(arktest.NewLogger(), obj, groupResource)
@@ -376,23 +481,20 @@ func TestBackupItemNoSkips(t *testing.T) {
return
}
// we have to delete status as that's what backupItem does,
// and this ensures that we're verifying the right data
delete(item, "status")
itemWithoutStatus, err := json.Marshal(&item)
// Convert to JSON for comparing number of bytes to the tar header
itemJSON, err := json.Marshal(&item)
if err != nil {
t.Fatal(err)
}
require.Equal(t, 1, len(w.headers), "headers")
assert.Equal(t, test.expectedTarHeaderName, w.headers[0].Name, "header.name")
assert.Equal(t, int64(len(itemWithoutStatus)), w.headers[0].Size, "header.size")
assert.Equal(t, int64(len(itemJSON)), w.headers[0].Size, "header.size")
assert.Equal(t, byte(tar.TypeReg), w.headers[0].Typeflag, "header.typeflag")
assert.Equal(t, int64(0755), w.headers[0].Mode, "header.mode")
assert.False(t, w.headers[0].ModTime.IsZero(), "header.modTime set")
assert.Equal(t, 1, len(w.data), "# of data")
actual, err := getAsMap(string(w.data[0]))
actual, err := arktest.GetAsMap(string(w.data[0]))
if err != nil {
t.Fatal(err)
}
@@ -412,7 +514,7 @@ func TestBackupItemNoSkips(t *testing.T) {
}
if test.snapshottableVolumes != nil {
require.Equal(t, 1, len(snapshotService.SnapshotsTaken))
require.Equal(t, len(test.snapshottableVolumes), len(snapshotService.SnapshotsTaken))
var expectedBackups []api.VolumeBackupInfo
for _, vbi := range test.snapshottableVolumes {
@@ -426,10 +528,96 @@ func TestBackupItemNoSkips(t *testing.T) {
assert.Equal(t, expectedBackups, actualBackups)
}
if test.expectedTrackedPVCs != nil {
require.Equal(t, len(test.expectedTrackedPVCs), len(b.resticSnapshotTracker.pvcs))
for key := range test.expectedTrackedPVCs {
assert.True(t, b.resticSnapshotTracker.pvcs.Has(key))
}
}
})
}
}
type addAnnotationAction struct{}
func (a *addAnnotationAction) Execute(item runtime.Unstructured, backup *v1.Backup) (runtime.Unstructured, []ResourceIdentifier, error) {
// since item actions run out-of-proc, do a deep-copy here to simulate passing data
// across a process boundary.
copy := item.(*unstructured.Unstructured).DeepCopy()
metadata, err := meta.Accessor(copy)
if err != nil {
return copy, nil, nil
}
annotations := metadata.GetAnnotations()
if annotations == nil {
annotations = make(map[string]string)
}
annotations["foo"] = "bar"
metadata.SetAnnotations(annotations)
return copy, nil, nil
}
func (a *addAnnotationAction) AppliesTo() (ResourceSelector, error) {
panic("not implemented")
}
func TestItemActionModificationsToItemPersist(t *testing.T) {
var (
w = &fakeTarWriter{}
obj = &unstructured.Unstructured{
Object: map[string]interface{}{
"metadata": map[string]interface{}{
"namespace": "myns",
"name": "bar",
},
},
}
actions = []resolvedAction{
{
ItemAction: &addAnnotationAction{},
namespaceIncludesExcludes: collections.NewIncludesExcludes(),
resourceIncludesExcludes: collections.NewIncludesExcludes(),
selector: labels.Everything(),
},
}
b = (&defaultItemBackupperFactory{}).newItemBackupper(
&v1.Backup{},
collections.NewIncludesExcludes(),
collections.NewIncludesExcludes(),
make(map[itemKey]struct{}),
actions,
nil,
w,
nil,
&arktest.FakeDynamicFactory{},
arktest.NewFakeDiscoveryHelper(true, nil),
nil,
nil,
newPVCSnapshotTracker(),
).(*defaultItemBackupper)
)
// our expected backed-up object is the passed-in object plus the annotation
// that the backup item action adds.
expected := obj.DeepCopy()
expected.SetAnnotations(map[string]string{"foo": "bar"})
// method under test
require.NoError(t, b.backupItem(arktest.NewLogger(), obj, schema.ParseGroupResource("resource.group")))
// get the actual backed-up item
require.Len(t, w.data, 1)
actual, err := arktest.GetAsMap(string(w.data[0]))
require.NoError(t, err)
assert.EqualValues(t, expected.Object, actual)
}
func TestTakePVSnapshot(t *testing.T) {
iops := int64(1000)
@@ -538,7 +726,7 @@ func TestTakePVSnapshot(t *testing.T) {
ib := &defaultItemBackupper{snapshotService: snapshotService}
pv, err := getAsMap(test.pv)
pv, err := arktest.GetAsMap(test.pv)
if err != nil {
t.Fatal(err)
}

View File

@@ -21,15 +21,19 @@ import (
"fmt"
"time"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/util/collections"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/kuberesource"
"github.com/heptio/ark/pkg/podexec"
"github.com/heptio/ark/pkg/util/collections"
)
type hookPhase string
@@ -56,7 +60,7 @@ type itemHookHandler interface {
// defaultItemHookHandler is the default itemHookHandler.
type defaultItemHookHandler struct {
podCommandExecutor podCommandExecutor
podCommandExecutor podexec.PodCommandExecutor
}
func (h *defaultItemHookHandler) handleHooks(
@@ -67,7 +71,7 @@ func (h *defaultItemHookHandler) handleHooks(
phase hookPhase,
) error {
// We only support hooks on pods right now
if groupResource != podsGroupResource {
if groupResource != kuberesource.Pods {
return nil
}
@@ -93,7 +97,7 @@ func (h *defaultItemHookHandler) handleHooks(
"hookPhase": phase,
},
)
if err := h.podCommandExecutor.executePodCommand(hookLog, obj.UnstructuredContent(), namespace, name, "<from-annotation>", hookFromAnnotations); err != nil {
if err := h.podCommandExecutor.ExecutePodCommand(hookLog, obj.UnstructuredContent(), namespace, name, "<from-annotation>", hookFromAnnotations); err != nil {
hookLog.WithError(err).Error("Error executing hook")
if hookFromAnnotations.OnError == api.HookErrorModeFail {
return err
@@ -117,7 +121,7 @@ func (h *defaultItemHookHandler) handleHooks(
hooks = resourceHook.post
}
for _, hook := range hooks {
if groupResource == podsGroupResource {
if groupResource == kuberesource.Pods {
if hook.Exec != nil {
hookLog := log.WithFields(
logrus.Fields{
@@ -126,7 +130,7 @@ func (h *defaultItemHookHandler) handleHooks(
"hookPhase": phase,
},
)
err := h.podCommandExecutor.executePodCommand(hookLog, obj.UnstructuredContent(), namespace, name, resourceHook.name, hook.Exec)
err := h.podCommandExecutor.ExecutePodCommand(hookLog, obj.UnstructuredContent(), namespace, name, resourceHook.name, hook.Exec)
if err != nil {
hookLog.WithError(err).Error("Error executing hook")
if hook.Exec.OnError == api.HookErrorModeFail {
@@ -146,8 +150,6 @@ const (
podBackupHookCommandAnnotationKey = "hook.backup.ark.heptio.com/command"
podBackupHookOnErrorAnnotationKey = "hook.backup.ark.heptio.com/on-error"
podBackupHookTimeoutAnnotationKey = "hook.backup.ark.heptio.com/timeout"
defaultHookOnError = api.HookErrorModeFail
defaultHookTimeout = 30 * time.Second
)
func phasedKey(phase hookPhase, key string) string {

View File

@@ -57,7 +57,7 @@ func TestHandleHooksSkips(t *testing.T) {
},
{
name: "pod without annotation / no spec hooks",
item: unstructuredOrDie(
item: arktest.UnstructuredOrDie(
`
{
"apiVersion": "v1",
@@ -73,7 +73,7 @@ func TestHandleHooksSkips(t *testing.T) {
{
name: "spec hooks not applicable",
groupResource: "pods",
item: unstructuredOrDie(
item: arktest.UnstructuredOrDie(
`
{
"apiVersion": "v1",
@@ -114,7 +114,7 @@ func TestHandleHooksSkips(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
podCommandExecutor := &mockPodCommandExecutor{}
podCommandExecutor := &arktest.MockPodCommandExecutor{}
defer podCommandExecutor.AssertExpectations(t)
h := &defaultItemHookHandler{
@@ -144,7 +144,7 @@ func TestHandleHooks(t *testing.T) {
name: "pod, no annotation, spec (multiple pre hooks) = run spec",
phase: hookPhasePre,
groupResource: "pods",
item: unstructuredOrDie(`
item: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "Pod",
@@ -194,7 +194,7 @@ func TestHandleHooks(t *testing.T) {
name: "pod, no annotation, spec (multiple post hooks) = run spec",
phase: hookPhasePost,
groupResource: "pods",
item: unstructuredOrDie(`
item: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "Pod",
@@ -244,7 +244,7 @@ func TestHandleHooks(t *testing.T) {
name: "pod, annotation (legacy), no spec = run annotation",
phase: hookPhasePre,
groupResource: "pods",
item: unstructuredOrDie(`
item: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "Pod",
@@ -266,7 +266,7 @@ func TestHandleHooks(t *testing.T) {
name: "pod, annotation (pre), no spec = run annotation",
phase: hookPhasePre,
groupResource: "pods",
item: unstructuredOrDie(`
item: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "Pod",
@@ -288,7 +288,7 @@ func TestHandleHooks(t *testing.T) {
name: "pod, annotation (post), no spec = run annotation",
phase: hookPhasePost,
groupResource: "pods",
item: unstructuredOrDie(`
item: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "Pod",
@@ -310,7 +310,7 @@ func TestHandleHooks(t *testing.T) {
name: "pod, annotation & spec = run annotation",
phase: hookPhasePre,
groupResource: "pods",
item: unstructuredOrDie(`
item: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "Pod",
@@ -345,7 +345,7 @@ func TestHandleHooks(t *testing.T) {
name: "pod, annotation, onError=fail = return error",
phase: hookPhasePre,
groupResource: "pods",
item: unstructuredOrDie(`
item: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "Pod",
@@ -371,7 +371,7 @@ func TestHandleHooks(t *testing.T) {
name: "pod, annotation, onError=continue = return nil",
phase: hookPhasePre,
groupResource: "pods",
item: unstructuredOrDie(`
item: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "Pod",
@@ -397,7 +397,7 @@ func TestHandleHooks(t *testing.T) {
name: "pod, spec, onError=fail = don't run other hooks",
phase: hookPhasePre,
groupResource: "pods",
item: unstructuredOrDie(`
item: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "Pod",
@@ -459,7 +459,7 @@ func TestHandleHooks(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
podCommandExecutor := &mockPodCommandExecutor{}
podCommandExecutor := &arktest.MockPodCommandExecutor{}
defer podCommandExecutor.AssertExpectations(t)
h := &defaultItemHookHandler{
@@ -467,20 +467,20 @@ func TestHandleHooks(t *testing.T) {
}
if test.expectedPodHook != nil {
podCommandExecutor.On("executePodCommand", mock.Anything, test.item.UnstructuredContent(), "ns", "name", "<from-annotation>", test.expectedPodHook).Return(test.expectedPodHookError)
podCommandExecutor.On("ExecutePodCommand", mock.Anything, test.item.UnstructuredContent(), "ns", "name", "<from-annotation>", test.expectedPodHook).Return(test.expectedPodHookError)
} else {
hookLoop:
for _, resourceHook := range test.hooks {
for _, hook := range resourceHook.pre {
hookError := test.hookErrorsByContainer[hook.Exec.Container]
podCommandExecutor.On("executePodCommand", mock.Anything, test.item.UnstructuredContent(), "ns", "name", resourceHook.name, hook.Exec).Return(hookError)
podCommandExecutor.On("ExecutePodCommand", mock.Anything, test.item.UnstructuredContent(), "ns", "name", resourceHook.name, hook.Exec).Return(hookError)
if hookError != nil && hook.Exec.OnError == v1.HookErrorModeFail {
break hookLoop
}
}
for _, hook := range resourceHook.post {
hookError := test.hookErrorsByContainer[hook.Exec.Container]
podCommandExecutor.On("executePodCommand", mock.Anything, test.item.UnstructuredContent(), "ns", "name", resourceHook.name, hook.Exec).Return(hookError)
podCommandExecutor.On("ExecutePodCommand", mock.Anything, test.item.UnstructuredContent(), "ns", "name", resourceHook.name, hook.Exec).Return(hookError)
if hookError != nil && hook.Exec.OnError == v1.HookErrorModeFail {
break hookLoop
}

View File

@@ -22,9 +22,9 @@ import (
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/kuberesource"
"github.com/heptio/ark/pkg/util/collections"
)
@@ -38,8 +38,6 @@ func NewPodAction(log logrus.FieldLogger) ItemAction {
return &podAction{log: log}
}
var pvcGroupResource = schema.GroupResource{Group: "", Resource: "persistentvolumeclaims"}
// AppliesTo returns a ResourceSelector that applies only to pods.
func (a *podAction) AppliesTo() (ResourceSelector, error) {
return ResourceSelector{
@@ -92,7 +90,7 @@ func (a *podAction) Execute(item runtime.Unstructured, backup *v1.Backup) (runti
a.log.Infof("Adding pvc %s to additionalItems", claimName)
additionalItems = append(additionalItems, ResourceIdentifier{
GroupResource: pvcGroupResource,
GroupResource: kuberesource.PersistentVolumeClaims,
Namespace: metadata.GetNamespace(),
Name: claimName,
})

View File

@@ -19,6 +19,7 @@ package backup
import (
"testing"
"github.com/heptio/ark/pkg/kuberesource"
arktest "github.com/heptio/ark/pkg/util/test"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -45,7 +46,7 @@ func TestPodActionExecute(t *testing.T) {
}{
{
name: "no spec.volumes",
pod: unstructuredOrDie(`
pod: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "Pod",
@@ -58,7 +59,7 @@ func TestPodActionExecute(t *testing.T) {
},
{
name: "persistentVolumeClaim without claimName",
pod: unstructuredOrDie(`
pod: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "Pod",
@@ -78,7 +79,7 @@ func TestPodActionExecute(t *testing.T) {
},
{
name: "full test, mix of volume types",
pod: unstructuredOrDie(`
pod: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "Pod",
@@ -108,8 +109,8 @@ func TestPodActionExecute(t *testing.T) {
}
`),
expected: []ResourceIdentifier{
{GroupResource: pvcGroupResource, Namespace: "foo", Name: "claim1"},
{GroupResource: pvcGroupResource, Namespace: "foo", Name: "claim2"},
{GroupResource: kuberesource.PersistentVolumeClaims, Namespace: "foo", Name: "claim1"},
{GroupResource: kuberesource.PersistentVolumeClaims, Namespace: "foo", Name: "claim2"},
},
},
}

View File

@@ -0,0 +1,61 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package backup
import (
"fmt"
corev1api "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
)
// pvcSnapshotTracker keeps track of persistent volume claims that have been snapshotted
// with restic.
type pvcSnapshotTracker struct {
pvcs sets.String
}
func newPVCSnapshotTracker() *pvcSnapshotTracker {
return &pvcSnapshotTracker{
pvcs: sets.NewString(),
}
}
// Track takes a pod and a list of volumes from that pod that were snapshotted, and
// tracks each snapshotted volume that's a PVC.
func (t *pvcSnapshotTracker) Track(pod *corev1api.Pod, snapshottedVolumes []string) {
for _, volumeName := range snapshottedVolumes {
// if the volume is a PVC, track it
for _, volume := range pod.Spec.Volumes {
if volume.Name == volumeName {
if volume.PersistentVolumeClaim != nil {
t.pvcs.Insert(key(pod.Namespace, volume.PersistentVolumeClaim.ClaimName))
}
break
}
}
}
}
// Has returns true if the PVC with the specified namespace and name has been tracked.
func (t *pvcSnapshotTracker) Has(namespace, name string) bool {
return t.pvcs.Has(key(namespace, name))
}
func key(namespace, name string) string {
return fmt.Sprintf("%s/%s", namespace, name)
}

140
pkg/backup/rbac.go Normal file
View File

@@ -0,0 +1,140 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package backup
import (
"github.com/pkg/errors"
rbac "k8s.io/api/rbac/v1"
rbacbeta "k8s.io/api/rbac/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
rbacclient "k8s.io/client-go/kubernetes/typed/rbac/v1"
rbacbetaclient "k8s.io/client-go/kubernetes/typed/rbac/v1beta1"
)
// ClusterRoleBindingLister allows for listing ClusterRoleBindings in a version-independent way.
type ClusterRoleBindingLister interface {
// List returns a slice of ClusterRoleBindings which can represent either v1 or v1beta1 ClusterRoleBindings.
List() ([]ClusterRoleBinding, error)
}
// noopClusterRoleBindingLister exists to handle clusters where RBAC is disabled.
type noopClusterRoleBindingLister struct {
}
func (noop noopClusterRoleBindingLister) List() ([]ClusterRoleBinding, error) {
return []ClusterRoleBinding{}, nil
}
type v1ClusterRoleBindingLister struct {
client rbacclient.ClusterRoleBindingInterface
}
func (v1 v1ClusterRoleBindingLister) List() ([]ClusterRoleBinding, error) {
crbList, err := v1.client.List(metav1.ListOptions{})
if err != nil {
return nil, errors.WithStack(err)
}
var crbs []ClusterRoleBinding
for _, crb := range crbList.Items {
crbs = append(crbs, v1ClusterRoleBinding{crb: crb})
}
return crbs, nil
}
type v1beta1ClusterRoleBindingLister struct {
client rbacbetaclient.ClusterRoleBindingInterface
}
func (v1beta1 v1beta1ClusterRoleBindingLister) List() ([]ClusterRoleBinding, error) {
crbList, err := v1beta1.client.List(metav1.ListOptions{})
if err != nil {
return nil, errors.WithStack(err)
}
var crbs []ClusterRoleBinding
for _, crb := range crbList.Items {
crbs = append(crbs, v1beta1ClusterRoleBinding{crb: crb})
}
return crbs, nil
}
// NewClusterRoleBindingListerMap creates a map of RBAC version strings to their associated
// ClusterRoleBindingLister structs.
// Necessary so that callers to the ClusterRoleBindingLister interfaces don't need the kubernetes.Interface.
func NewClusterRoleBindingListerMap(clientset kubernetes.Interface) map[string]ClusterRoleBindingLister {
return map[string]ClusterRoleBindingLister{
rbac.SchemeGroupVersion.Version: v1ClusterRoleBindingLister{client: clientset.RbacV1().ClusterRoleBindings()},
rbacbeta.SchemeGroupVersion.Version: v1beta1ClusterRoleBindingLister{client: clientset.RbacV1beta1().ClusterRoleBindings()},
"": noopClusterRoleBindingLister{},
}
}
// ClusterRoleBinding abstracts access to ClusterRoleBindings whether they're v1 or v1beta1.
type ClusterRoleBinding interface {
// Name returns the name of a ClusterRoleBinding.
Name() string
// ServiceAccountSubjects returns the names of subjects that are service accounts in the given namespace.
ServiceAccountSubjects(namespace string) []string
// RoleRefName returns the name of a ClusterRoleBinding's RoleRef.
RoleRefName() string
}
type v1ClusterRoleBinding struct {
crb rbac.ClusterRoleBinding
}
func (c v1ClusterRoleBinding) Name() string {
return c.crb.Name
}
func (c v1ClusterRoleBinding) RoleRefName() string {
return c.crb.RoleRef.Name
}
func (c v1ClusterRoleBinding) ServiceAccountSubjects(namespace string) []string {
var saSubjects []string
for _, s := range c.crb.Subjects {
if s.Kind == rbac.ServiceAccountKind && s.Namespace == namespace {
saSubjects = append(saSubjects, s.Name)
}
}
return saSubjects
}
type v1beta1ClusterRoleBinding struct {
crb rbacbeta.ClusterRoleBinding
}
func (c v1beta1ClusterRoleBinding) Name() string {
return c.crb.Name
}
func (c v1beta1ClusterRoleBinding) RoleRefName() string {
return c.crb.RoleRef.Name
}
func (c v1beta1ClusterRoleBinding) ServiceAccountSubjects(namespace string) []string {
var saSubjects []string
for _, s := range c.crb.Subjects {
if s.Kind == rbac.ServiceAccountKind && s.Namespace == namespace {
saSubjects = append(saSubjects, s.Name)
}
}
return saSubjects
}

View File

@@ -17,19 +17,24 @@ limitations under the License.
package backup
import (
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cloudprovider"
"github.com/heptio/ark/pkg/discovery"
"github.com/heptio/ark/pkg/util/collections"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
kuberrs "k8s.io/apimachinery/pkg/util/errors"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cloudprovider"
"github.com/heptio/ark/pkg/discovery"
"github.com/heptio/ark/pkg/kuberesource"
"github.com/heptio/ark/pkg/podexec"
"github.com/heptio/ark/pkg/restic"
"github.com/heptio/ark/pkg/util/collections"
)
type resourceBackupperFactory interface {
@@ -38,16 +43,17 @@ type resourceBackupperFactory interface {
backup *api.Backup,
namespaces *collections.IncludesExcludes,
resources *collections.IncludesExcludes,
labelSelector string,
dynamicFactory client.DynamicFactory,
discoveryHelper discovery.Helper,
backedUpItems map[itemKey]struct{},
cohabitatingResources map[string]*cohabitatingResource,
actions []resolvedAction,
podCommandExecutor podCommandExecutor,
podCommandExecutor podexec.PodCommandExecutor,
tarWriter tarWriter,
resourceHooks []resourceHook,
snapshotService cloudprovider.SnapshotService,
resticBackupper restic.Backupper,
resticSnapshotTracker *pvcSnapshotTracker,
) resourceBackupper
}
@@ -58,23 +64,23 @@ func (f *defaultResourceBackupperFactory) newResourceBackupper(
backup *api.Backup,
namespaces *collections.IncludesExcludes,
resources *collections.IncludesExcludes,
labelSelector string,
dynamicFactory client.DynamicFactory,
discoveryHelper discovery.Helper,
backedUpItems map[itemKey]struct{},
cohabitatingResources map[string]*cohabitatingResource,
actions []resolvedAction,
podCommandExecutor podCommandExecutor,
podCommandExecutor podexec.PodCommandExecutor,
tarWriter tarWriter,
resourceHooks []resourceHook,
snapshotService cloudprovider.SnapshotService,
resticBackupper restic.Backupper,
resticSnapshotTracker *pvcSnapshotTracker,
) resourceBackupper {
return &defaultResourceBackupper{
log: log,
backup: backup,
namespaces: namespaces,
resources: resources,
labelSelector: labelSelector,
dynamicFactory: dynamicFactory,
discoveryHelper: discoveryHelper,
backedUpItems: backedUpItems,
@@ -84,6 +90,8 @@ func (f *defaultResourceBackupperFactory) newResourceBackupper(
tarWriter: tarWriter,
resourceHooks: resourceHooks,
snapshotService: snapshotService,
resticBackupper: resticBackupper,
resticSnapshotTracker: resticSnapshotTracker,
itemBackupperFactory: &defaultItemBackupperFactory{},
}
}
@@ -97,16 +105,17 @@ type defaultResourceBackupper struct {
backup *api.Backup
namespaces *collections.IncludesExcludes
resources *collections.IncludesExcludes
labelSelector string
dynamicFactory client.DynamicFactory
discoveryHelper discovery.Helper
backedUpItems map[itemKey]struct{}
cohabitatingResources map[string]*cohabitatingResource
actions []resolvedAction
podCommandExecutor podCommandExecutor
podCommandExecutor podexec.PodCommandExecutor
tarWriter tarWriter
resourceHooks []resourceHook
snapshotService cloudprovider.SnapshotService
resticBackupper restic.Backupper
resticSnapshotTracker *pvcSnapshotTracker
itemBackupperFactory itemBackupperFactory
}
@@ -132,7 +141,7 @@ func (rb *defaultResourceBackupper) backupResource(
// If the resource we are backing up is NOT namespaces, and it is cluster-scoped, check to see if
// we should include it based on the IncludeClusterResources setting.
if gr != namespacesGroupResource && clusterScoped {
if gr != kuberesource.Namespaces && clusterScoped {
if rb.backup.Spec.IncludeClusterResources == nil {
if !rb.namespaces.IncludeEverything() {
// when IncludeClusterResources == nil (auto), only directly
@@ -181,12 +190,14 @@ func (rb *defaultResourceBackupper) backupResource(
rb.dynamicFactory,
rb.discoveryHelper,
rb.snapshotService,
rb.resticBackupper,
rb.resticSnapshotTracker,
)
namespacesToList := getNamespacesToList(rb.namespaces)
// Check if we're backing up namespaces, and only certain ones
if gr == namespacesGroupResource && namespacesToList[0] != "" {
if gr == kuberesource.Namespaces && namespacesToList[0] != "" {
resourceClient, err := rb.dynamicFactory.ClientForGroupVersionResource(gv, resource, "")
if err != nil {
return err
@@ -234,8 +245,13 @@ func (rb *defaultResourceBackupper) backupResource(
return err
}
var labelSelector string
if selector := rb.backup.Spec.LabelSelector; selector != nil {
labelSelector = metav1.FormatLabelSelector(selector)
}
log.WithField("namespace", namespace).Info("Listing items")
unstructuredList, err := resourceClient.List(metav1.ListOptions{LabelSelector: rb.labelSelector})
unstructuredList, err := resourceClient.List(metav1.ListOptions{LabelSelector: labelSelector})
if err != nil {
return errors.WithStack(err)
}
@@ -260,7 +276,7 @@ func (rb *defaultResourceBackupper) backupResource(
continue
}
if gr == namespacesGroupResource && !rb.namespaces.ShouldInclude(metadata.GetName()) {
if gr == kuberesource.Namespaces && !rb.namespaces.ShouldInclude(metadata.GetName()) {
log.WithField("name", metadata.GetName()).Info("skipping namespace because it is excluded")
continue
}

View File

@@ -23,6 +23,9 @@ import (
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cloudprovider"
"github.com/heptio/ark/pkg/discovery"
"github.com/heptio/ark/pkg/kuberesource"
"github.com/heptio/ark/pkg/podexec"
"github.com/heptio/ark/pkg/restic"
"github.com/heptio/ark/pkg/util/collections"
arktest "github.com/heptio/ark/pkg/util/test"
"github.com/stretchr/testify/assert"
@@ -73,8 +76,8 @@ func TestBackupResource(t *testing.T) {
groupResource: schema.GroupResource{Group: "", Resource: "pods"},
listResponses: [][]*unstructured.Unstructured{
{
unstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"myns","name":"myname1"}}`),
unstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"myns","name":"myname2"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"myns","name":"myname1"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"myns","name":"myname2"}}`),
},
},
},
@@ -89,12 +92,12 @@ func TestBackupResource(t *testing.T) {
groupResource: schema.GroupResource{Group: "", Resource: "pods"},
listResponses: [][]*unstructured.Unstructured{
{
unstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"a","name":"myname1"}}`),
unstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"a","name":"myname2"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"a","name":"myname1"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"a","name":"myname2"}}`),
},
{
unstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"b","name":"myname3"}}`),
unstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"b","name":"myname4"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"b","name":"myname3"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"b","name":"myname4"}}`),
},
},
},
@@ -109,8 +112,8 @@ func TestBackupResource(t *testing.T) {
groupResource: schema.GroupResource{Group: "certificates.k8s.io", Resource: "certificatesigningrequests"},
listResponses: [][]*unstructured.Unstructured{
{
unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`),
unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`),
},
},
},
@@ -126,8 +129,8 @@ func TestBackupResource(t *testing.T) {
groupResource: schema.GroupResource{Group: "certificates.k8s.io", Resource: "certificatesigningrequests"},
listResponses: [][]*unstructured.Unstructured{
{
unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`),
unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`),
},
},
},
@@ -165,8 +168,8 @@ func TestBackupResource(t *testing.T) {
groupResource: schema.GroupResource{Group: "certificates.k8s.io", Resource: "certificatesigningrequests"},
listResponses: [][]*unstructured.Unstructured{
{
unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`),
unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`),
},
},
},
@@ -193,8 +196,8 @@ func TestBackupResource(t *testing.T) {
groupResource: schema.GroupResource{Group: "certificates.k8s.io", Resource: "certificatesigningrequests"},
listResponses: [][]*unstructured.Unstructured{
{
unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`),
unstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`),
},
},
},
@@ -210,8 +213,8 @@ func TestBackupResource(t *testing.T) {
groupResource: schema.GroupResource{Group: "", Resource: "namespaces"},
expectSkip: false,
getResponses: []*unstructured.Unstructured{
unstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-1"}}`),
unstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-2"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-1"}}`),
arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-2"}}`),
},
},
}
@@ -223,8 +226,6 @@ func TestBackupResource(t *testing.T) {
},
}
labelSelector := "foo=bar"
dynamicFactory := &arktest.FakeDynamicFactory{}
defer dynamicFactory.AssertExpectations(t)
@@ -250,7 +251,7 @@ func TestBackupResource(t *testing.T) {
{name: "myhook"},
}
podCommandExecutor := &mockPodCommandExecutor{}
podCommandExecutor := &arktest.MockPodCommandExecutor{}
defer podCommandExecutor.AssertExpectations(t)
tarWriter := &fakeTarWriter{}
@@ -261,7 +262,6 @@ func TestBackupResource(t *testing.T) {
backup,
test.namespaces,
test.resources,
labelSelector,
dynamicFactory,
discoveryHelper,
backedUpItems,
@@ -270,7 +270,9 @@ func TestBackupResource(t *testing.T) {
podCommandExecutor,
tarWriter,
resourceHooks,
nil,
nil, // snapshot service
nil, // restic backupper
newPVCSnapshotTracker(),
).(*defaultResourceBackupper)
itemBackupperFactory := &mockItemBackupperFactory{}
@@ -293,6 +295,8 @@ func TestBackupResource(t *testing.T) {
dynamicFactory,
discoveryHelper,
mock.Anything,
mock.Anything,
mock.Anything,
).Return(itemBackupper)
if len(test.listResponses) > 0 {
@@ -309,7 +313,7 @@ func TestBackupResource(t *testing.T) {
list.Items = append(list.Items, *item)
itemBackupper.On("backupItem", mock.AnythingOfType("*logrus.Entry"), item, test.groupResource).Return(nil)
}
client.On("List", metav1.ListOptions{LabelSelector: labelSelector}).Return(list, nil)
client.On("List", metav1.ListOptions{}).Return(list, nil)
}
}
@@ -378,13 +382,19 @@ func TestBackupResourceCohabitation(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
backup := &v1.Backup{}
backup := &v1.Backup{
Spec: v1.BackupSpec{
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"foo": "bar",
},
},
},
}
namespaces := collections.NewIncludesExcludes().Includes("*")
resources := collections.NewIncludesExcludes().Includes("*")
labelSelector := "foo=bar"
dynamicFactory := &arktest.FakeDynamicFactory{}
defer dynamicFactory.AssertExpectations(t)
@@ -410,7 +420,7 @@ func TestBackupResourceCohabitation(t *testing.T) {
{name: "myhook"},
}
podCommandExecutor := &mockPodCommandExecutor{}
podCommandExecutor := &arktest.MockPodCommandExecutor{}
defer podCommandExecutor.AssertExpectations(t)
tarWriter := &fakeTarWriter{}
@@ -420,7 +430,6 @@ func TestBackupResourceCohabitation(t *testing.T) {
backup,
namespaces,
resources,
labelSelector,
dynamicFactory,
discoveryHelper,
backedUpItems,
@@ -429,7 +438,9 @@ func TestBackupResourceCohabitation(t *testing.T) {
podCommandExecutor,
tarWriter,
resourceHooks,
nil,
nil, // snapshot service
nil, // restic backupper
newPVCSnapshotTracker(),
).(*defaultResourceBackupper)
itemBackupperFactory := &mockItemBackupperFactory{}
@@ -450,7 +461,9 @@ func TestBackupResourceCohabitation(t *testing.T) {
resourceHooks,
dynamicFactory,
discoveryHelper,
mock.Anything,
mock.Anything, // snapshot service
mock.Anything, // restic backupper
mock.Anything, // pvc snapshot tracker
).Return(itemBackupper)
client := &arktest.FakeDynamicClient{}
@@ -458,7 +471,7 @@ func TestBackupResourceCohabitation(t *testing.T) {
// STEP 1: make sure the initial backup goes through
dynamicFactory.On("ClientForGroupVersionResource", test.groupVersion1, test.apiResource, "").Return(client, nil)
client.On("List", metav1.ListOptions{LabelSelector: labelSelector}).Return(&unstructured.UnstructuredList{}, nil)
client.On("List", metav1.ListOptions{LabelSelector: metav1.FormatLabelSelector(backup.Spec.LabelSelector)}).Return(&unstructured.UnstructuredList{}, nil)
// STEP 2: do the backup
err := rb.backupResource(test.apiGroup1, test.apiResource)
@@ -477,7 +490,6 @@ func TestBackupResourceOnlyIncludesSpecifiedNamespaces(t *testing.T) {
namespaces := collections.NewIncludesExcludes().Includes("ns-1")
resources := collections.NewIncludesExcludes().Includes("*")
labelSelector := "foo=bar"
backedUpItems := map[itemKey]struct{}{}
dynamicFactory := &arktest.FakeDynamicFactory{}
@@ -491,7 +503,7 @@ func TestBackupResourceOnlyIncludesSpecifiedNamespaces(t *testing.T) {
resourceHooks := []resourceHook{}
podCommandExecutor := &mockPodCommandExecutor{}
podCommandExecutor := &arktest.MockPodCommandExecutor{}
defer podCommandExecutor.AssertExpectations(t)
tarWriter := &fakeTarWriter{}
@@ -501,7 +513,6 @@ func TestBackupResourceOnlyIncludesSpecifiedNamespaces(t *testing.T) {
backup,
namespaces,
resources,
labelSelector,
dynamicFactory,
discoveryHelper,
backedUpItems,
@@ -510,7 +521,9 @@ func TestBackupResourceOnlyIncludesSpecifiedNamespaces(t *testing.T) {
podCommandExecutor,
tarWriter,
resourceHooks,
nil,
nil, // snapshot service
nil, // restic backupper
newPVCSnapshotTracker(),
).(*defaultResourceBackupper)
itemBackupperFactory := &mockItemBackupperFactory{}
@@ -546,6 +559,8 @@ func TestBackupResourceOnlyIncludesSpecifiedNamespaces(t *testing.T) {
dynamicFactory,
discoveryHelper,
mock.Anything,
mock.Anything,
mock.Anything,
).Return(itemBackupper)
client := &arktest.FakeDynamicClient{}
@@ -553,7 +568,7 @@ func TestBackupResourceOnlyIncludesSpecifiedNamespaces(t *testing.T) {
coreV1Group := schema.GroupVersion{Group: "", Version: "v1"}
dynamicFactory.On("ClientForGroupVersionResource", coreV1Group, namespacesResource, "").Return(client, nil)
ns1 := unstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-1"}}`)
ns1 := arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-1"}}`)
client.On("Get", "ns-1", metav1.GetOptions{}).Return(ns1, nil)
itemHookHandler.On("handleHooks", mock.Anything, schema.GroupResource{Group: "", Resource: "namespaces"}, ns1, resourceHooks, hookPhasePre).Return(nil)
@@ -567,12 +582,19 @@ func TestBackupResourceOnlyIncludesSpecifiedNamespaces(t *testing.T) {
}
func TestBackupResourceListAllNamespacesExcludesCorrectly(t *testing.T) {
backup := &v1.Backup{}
backup := &v1.Backup{
Spec: v1.BackupSpec{
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"foo": "bar",
},
},
},
}
namespaces := collections.NewIncludesExcludes().Excludes("ns-1")
resources := collections.NewIncludesExcludes().Includes("*")
labelSelector := "foo=bar"
backedUpItems := map[itemKey]struct{}{}
dynamicFactory := &arktest.FakeDynamicFactory{}
@@ -586,7 +608,7 @@ func TestBackupResourceListAllNamespacesExcludesCorrectly(t *testing.T) {
resourceHooks := []resourceHook{}
podCommandExecutor := &mockPodCommandExecutor{}
podCommandExecutor := &arktest.MockPodCommandExecutor{}
defer podCommandExecutor.AssertExpectations(t)
tarWriter := &fakeTarWriter{}
@@ -596,7 +618,6 @@ func TestBackupResourceListAllNamespacesExcludesCorrectly(t *testing.T) {
backup,
namespaces,
resources,
labelSelector,
dynamicFactory,
discoveryHelper,
backedUpItems,
@@ -605,7 +626,9 @@ func TestBackupResourceListAllNamespacesExcludesCorrectly(t *testing.T) {
podCommandExecutor,
tarWriter,
resourceHooks,
nil,
nil, // snapshot service
nil, // restic backupper
newPVCSnapshotTracker(),
).(*defaultResourceBackupper)
itemBackupperFactory := &mockItemBackupperFactory{}
@@ -630,6 +653,8 @@ func TestBackupResourceListAllNamespacesExcludesCorrectly(t *testing.T) {
dynamicFactory,
discoveryHelper,
mock.Anything,
mock.Anything,
mock.Anything,
).Return(itemBackupper)
client := &arktest.FakeDynamicClient{}
@@ -638,14 +663,14 @@ func TestBackupResourceListAllNamespacesExcludesCorrectly(t *testing.T) {
coreV1Group := schema.GroupVersion{Group: "", Version: "v1"}
dynamicFactory.On("ClientForGroupVersionResource", coreV1Group, namespacesResource, "").Return(client, nil)
ns1 := unstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-1"}}`)
ns2 := unstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-2"}}`)
ns1 := arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-1"}}`)
ns2 := arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-2"}}`)
list := &unstructured.UnstructuredList{
Items: []unstructured.Unstructured{*ns1, *ns2},
}
client.On("List", metav1.ListOptions{LabelSelector: labelSelector}).Return(list, nil)
client.On("List", metav1.ListOptions{LabelSelector: metav1.FormatLabelSelector(backup.Spec.LabelSelector)}).Return(list, nil)
itemBackupper.On("backupItem", mock.AnythingOfType("*logrus.Entry"), ns2, namespacesGroupResource).Return(nil)
itemBackupper.On("backupItem", mock.AnythingOfType("*logrus.Entry"), ns2, kuberesource.Namespaces).Return(nil)
err := rb.backupResource(v1Group, namespacesResource)
require.NoError(t, err)
@@ -660,12 +685,14 @@ func (ibf *mockItemBackupperFactory) newItemBackupper(
namespaces, resources *collections.IncludesExcludes,
backedUpItems map[itemKey]struct{},
actions []resolvedAction,
podCommandExecutor podCommandExecutor,
podCommandExecutor podexec.PodCommandExecutor,
tarWriter tarWriter,
resourceHooks []resourceHook,
dynamicFactory client.DynamicFactory,
discoveryHelper discovery.Helper,
snapshotService cloudprovider.SnapshotService,
resticBackupper restic.Backupper,
resticSnapshotTracker *pvcSnapshotTracker,
) ItemBackupper {
args := ibf.Called(
backup,
@@ -679,6 +706,8 @@ func (ibf *mockItemBackupperFactory) newItemBackupper(
dynamicFactory,
discoveryHelper,
snapshotService,
resticBackupper,
resticSnapshotTracker,
)
return args.Get(0).(ItemBackupper)
}

View File

@@ -0,0 +1,121 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package backup
import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
rbac "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"github.com/heptio/ark/pkg/apis/ark/v1"
arkdiscovery "github.com/heptio/ark/pkg/discovery"
"github.com/heptio/ark/pkg/kuberesource"
)
// serviceAccountAction implements ItemAction.
type serviceAccountAction struct {
log logrus.FieldLogger
clusterRoleBindings []ClusterRoleBinding
}
// NewServiceAccountAction creates a new ItemAction for service accounts.
func NewServiceAccountAction(log logrus.FieldLogger, clusterRoleBindingListers map[string]ClusterRoleBindingLister, discoveryHelper arkdiscovery.Helper) (ItemAction, error) {
// Look up the supported RBAC version
var supportedAPI metav1.GroupVersionForDiscovery
for _, ag := range discoveryHelper.APIGroups() {
if ag.Name == rbac.GroupName {
supportedAPI = ag.PreferredVersion
break
}
}
crbLister := clusterRoleBindingListers[supportedAPI.Version]
// This should be safe because the List call will return a 0-item slice
// if there's no matching API version.
crbs, err := crbLister.List()
if err != nil {
return nil, err
}
return &serviceAccountAction{
log: log,
clusterRoleBindings: crbs,
}, nil
}
// AppliesTo returns a ResourceSelector that applies only to service accounts.
func (a *serviceAccountAction) AppliesTo() (ResourceSelector, error) {
return ResourceSelector{
IncludedResources: []string{"serviceaccounts"},
}, nil
}
// Execute checks for any ClusterRoleBindings that have this service account as a subject, and
// adds the ClusterRoleBinding and associated ClusterRole to the list of additional items to
// be backed up.
func (a *serviceAccountAction) Execute(item runtime.Unstructured, backup *v1.Backup) (runtime.Unstructured, []ResourceIdentifier, error) {
a.log.Info("Running serviceAccountAction")
defer a.log.Info("Done running serviceAccountAction")
objectMeta, err := meta.Accessor(item)
if err != nil {
return nil, nil, errors.WithStack(err)
}
var (
namespace = objectMeta.GetNamespace()
name = objectMeta.GetName()
bindings = sets.NewString()
roles = sets.NewString()
)
for _, crb := range a.clusterRoleBindings {
for _, s := range crb.ServiceAccountSubjects(namespace) {
if s == name {
a.log.Infof("Adding clusterrole %s and clusterrolebinding %s to additionalItems since serviceaccount %s/%s is a subject",
crb.RoleRefName(), crb.Name(), namespace, name)
bindings.Insert(crb.Name())
roles.Insert(crb.RoleRefName())
break
}
}
}
var additionalItems []ResourceIdentifier
for binding := range bindings {
additionalItems = append(additionalItems, ResourceIdentifier{
GroupResource: kuberesource.ClusterRoleBindings,
Name: binding,
})
}
for role := range roles {
additionalItems = append(additionalItems, ResourceIdentifier{
GroupResource: kuberesource.ClusterRoles,
Name: role,
})
}
return item, additionalItems, nil
}

View File

@@ -0,0 +1,614 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package backup
import (
"fmt"
"sort"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
rbac "k8s.io/api/rbac/v1"
rbacbeta "k8s.io/api/rbac/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"github.com/heptio/ark/pkg/kuberesource"
arktest "github.com/heptio/ark/pkg/util/test"
)
func newV1ClusterRoleBindingList(rbacCRBList []rbac.ClusterRoleBinding) []ClusterRoleBinding {
var crbs []ClusterRoleBinding
for _, c := range rbacCRBList {
crbs = append(crbs, v1ClusterRoleBinding{crb: c})
}
return crbs
}
func newV1beta1ClusterRoleBindingList(rbacCRBList []rbacbeta.ClusterRoleBinding) []ClusterRoleBinding {
var crbs []ClusterRoleBinding
for _, c := range rbacCRBList {
crbs = append(crbs, v1beta1ClusterRoleBinding{crb: c})
}
return crbs
}
type FakeV1ClusterRoleBindingLister struct {
v1crbs []rbac.ClusterRoleBinding
}
func (f FakeV1ClusterRoleBindingLister) List() ([]ClusterRoleBinding, error) {
var crbs []ClusterRoleBinding
for _, c := range f.v1crbs {
crbs = append(crbs, v1ClusterRoleBinding{crb: c})
}
return crbs, nil
}
type FakeV1beta1ClusterRoleBindingLister struct {
v1beta1crbs []rbacbeta.ClusterRoleBinding
}
func (f FakeV1beta1ClusterRoleBindingLister) List() ([]ClusterRoleBinding, error) {
var crbs []ClusterRoleBinding
for _, c := range f.v1beta1crbs {
crbs = append(crbs, v1beta1ClusterRoleBinding{crb: c})
}
return crbs, nil
}
func TestServiceAccountActionAppliesTo(t *testing.T) {
// Instantiating the struct directly since using
// NewServiceAccountAction requires a full kubernetes clientset
a := &serviceAccountAction{}
actual, err := a.AppliesTo()
require.NoError(t, err)
expected := ResourceSelector{
IncludedResources: []string{"serviceaccounts"},
}
assert.Equal(t, expected, actual)
}
func TestNewServiceAccountAction(t *testing.T) {
tests := []struct {
name string
version string
expectedCRBs []ClusterRoleBinding
}{
{
name: "rbac v1 API instantiates an saAction",
version: rbac.SchemeGroupVersion.Version,
expectedCRBs: []ClusterRoleBinding{
v1ClusterRoleBinding{
crb: rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "v1crb-1",
},
},
},
v1ClusterRoleBinding{
crb: rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "v1crb-2",
},
},
},
},
},
{
name: "rbac v1beta1 API instantiates an saAction",
version: rbacbeta.SchemeGroupVersion.Version,
expectedCRBs: []ClusterRoleBinding{
v1beta1ClusterRoleBinding{
crb: rbacbeta.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "v1beta1crb-1",
},
},
},
v1beta1ClusterRoleBinding{
crb: rbacbeta.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "v1beta1crb-2",
},
},
},
},
},
{
name: "no RBAC API instantiates an saAction with empty slice",
version: "",
expectedCRBs: []ClusterRoleBinding{},
},
}
// Set up all of our fakes outside the test loop
discoveryHelper := arktest.FakeDiscoveryHelper{}
logger := arktest.NewLogger()
v1crbs := []rbac.ClusterRoleBinding{
{
ObjectMeta: metav1.ObjectMeta{
Name: "v1crb-1",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "v1crb-2",
},
},
}
v1beta1crbs := []rbacbeta.ClusterRoleBinding{
{
ObjectMeta: metav1.ObjectMeta{
Name: "v1beta1crb-1",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "v1beta1crb-2",
},
},
}
clusterRoleBindingListers := map[string]ClusterRoleBindingLister{
rbac.SchemeGroupVersion.Version: FakeV1ClusterRoleBindingLister{v1crbs: v1crbs},
rbacbeta.SchemeGroupVersion.Version: FakeV1beta1ClusterRoleBindingLister{v1beta1crbs: v1beta1crbs},
"": noopClusterRoleBindingLister{},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
// We only care about the preferred version, nothing else in the list
discoveryHelper.APIGroupsList = []metav1.APIGroup{
{
Name: rbac.GroupName,
PreferredVersion: metav1.GroupVersionForDiscovery{
Version: test.version,
},
},
}
action, err := NewServiceAccountAction(logger, clusterRoleBindingListers, &discoveryHelper)
require.NoError(t, err)
saAction, ok := action.(*serviceAccountAction)
require.True(t, ok)
assert.Equal(t, test.expectedCRBs, saAction.clusterRoleBindings)
})
}
}
func TestServiceAccountActionExecute(t *testing.T) {
tests := []struct {
name string
serviceAccount runtime.Unstructured
crbs []rbac.ClusterRoleBinding
expectedAdditionalItems []ResourceIdentifier
}{
{
name: "no crbs",
serviceAccount: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "ServiceAccount",
"metadata": {
"namespace": "heptio-ark",
"name": "ark"
}
}
`),
crbs: nil,
expectedAdditionalItems: nil,
},
{
name: "no matching crbs",
serviceAccount: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "ServiceAccount",
"metadata": {
"namespace": "heptio-ark",
"name": "ark"
}
}
`),
crbs: []rbac.ClusterRoleBinding{
{
Subjects: []rbac.Subject{
{
Kind: "non-matching-kind",
Namespace: "non-matching-ns",
Name: "non-matching-name",
},
{
Kind: "non-matching-kind",
Namespace: "heptio-ark",
Name: "ark",
},
{
Kind: rbac.ServiceAccountKind,
Namespace: "non-matching-ns",
Name: "ark",
},
{
Kind: rbac.ServiceAccountKind,
Namespace: "heptio-ark",
Name: "non-matching-name",
},
},
RoleRef: rbac.RoleRef{
Name: "role",
},
},
},
expectedAdditionalItems: nil,
},
{
name: "some matching crbs",
serviceAccount: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "ServiceAccount",
"metadata": {
"namespace": "heptio-ark",
"name": "ark"
}
}
`),
crbs: []rbac.ClusterRoleBinding{
{
ObjectMeta: metav1.ObjectMeta{
Name: "crb-1",
},
Subjects: []rbac.Subject{
{
Kind: "non-matching-kind",
Namespace: "non-matching-ns",
Name: "non-matching-name",
},
},
RoleRef: rbac.RoleRef{
Name: "role-1",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "crb-2",
},
Subjects: []rbac.Subject{
{
Kind: "non-matching-kind",
Namespace: "non-matching-ns",
Name: "non-matching-name",
},
{
Kind: rbac.ServiceAccountKind,
Namespace: "heptio-ark",
Name: "ark",
},
},
RoleRef: rbac.RoleRef{
Name: "role-2",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "crb-3",
},
Subjects: []rbac.Subject{
{
Kind: rbac.ServiceAccountKind,
Namespace: "heptio-ark",
Name: "ark",
},
},
RoleRef: rbac.RoleRef{
Name: "role-3",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "crb-4",
},
Subjects: []rbac.Subject{
{
Kind: rbac.ServiceAccountKind,
Namespace: "heptio-ark",
Name: "ark",
},
{
Kind: "non-matching-kind",
Namespace: "non-matching-ns",
Name: "non-matching-name",
},
},
RoleRef: rbac.RoleRef{
Name: "role-4",
},
},
},
expectedAdditionalItems: []ResourceIdentifier{
{
GroupResource: kuberesource.ClusterRoleBindings,
Name: "crb-2",
},
{
GroupResource: kuberesource.ClusterRoleBindings,
Name: "crb-3",
},
{
GroupResource: kuberesource.ClusterRoleBindings,
Name: "crb-4",
},
{
GroupResource: kuberesource.ClusterRoles,
Name: "role-2",
},
{
GroupResource: kuberesource.ClusterRoles,
Name: "role-3",
},
{
GroupResource: kuberesource.ClusterRoles,
Name: "role-4",
},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
// Create the action struct directly so we don't need to mock a clientset
action := &serviceAccountAction{
log: arktest.NewLogger(),
clusterRoleBindings: newV1ClusterRoleBindingList(test.crbs),
}
res, additional, err := action.Execute(test.serviceAccount, nil)
assert.Equal(t, test.serviceAccount, res)
assert.Nil(t, err)
// ensure slices are ordered for valid comparison
sort.Slice(test.expectedAdditionalItems, func(i, j int) bool {
return fmt.Sprintf("%s.%s", test.expectedAdditionalItems[i].GroupResource.String(), test.expectedAdditionalItems[i].Name) <
fmt.Sprintf("%s.%s", test.expectedAdditionalItems[j].GroupResource.String(), test.expectedAdditionalItems[j].Name)
})
sort.Slice(additional, func(i, j int) bool {
return fmt.Sprintf("%s.%s", additional[i].GroupResource.String(), additional[i].Name) <
fmt.Sprintf("%s.%s", additional[j].GroupResource.String(), additional[j].Name)
})
assert.Equal(t, test.expectedAdditionalItems, additional)
})
}
}
func TestServiceAccountActionExecuteOnBeta1(t *testing.T) {
tests := []struct {
name string
serviceAccount runtime.Unstructured
crbs []rbacbeta.ClusterRoleBinding
expectedAdditionalItems []ResourceIdentifier
}{
{
name: "no crbs",
serviceAccount: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "ServiceAccount",
"metadata": {
"namespace": "heptio-ark",
"name": "ark"
}
}
`),
crbs: nil,
expectedAdditionalItems: nil,
},
{
name: "no matching crbs",
serviceAccount: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "ServiceAccount",
"metadata": {
"namespace": "heptio-ark",
"name": "ark"
}
}
`),
crbs: []rbacbeta.ClusterRoleBinding{
{
Subjects: []rbacbeta.Subject{
{
Kind: "non-matching-kind",
Namespace: "non-matching-ns",
Name: "non-matching-name",
},
{
Kind: "non-matching-kind",
Namespace: "heptio-ark",
Name: "ark",
},
{
Kind: rbacbeta.ServiceAccountKind,
Namespace: "non-matching-ns",
Name: "ark",
},
{
Kind: rbacbeta.ServiceAccountKind,
Namespace: "heptio-ark",
Name: "non-matching-name",
},
},
RoleRef: rbacbeta.RoleRef{
Name: "role",
},
},
},
expectedAdditionalItems: nil,
},
{
name: "some matching crbs",
serviceAccount: arktest.UnstructuredOrDie(`
{
"apiVersion": "v1",
"kind": "ServiceAccount",
"metadata": {
"namespace": "heptio-ark",
"name": "ark"
}
}
`),
crbs: []rbacbeta.ClusterRoleBinding{
{
ObjectMeta: metav1.ObjectMeta{
Name: "crb-1",
},
Subjects: []rbacbeta.Subject{
{
Kind: "non-matching-kind",
Namespace: "non-matching-ns",
Name: "non-matching-name",
},
},
RoleRef: rbacbeta.RoleRef{
Name: "role-1",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "crb-2",
},
Subjects: []rbacbeta.Subject{
{
Kind: "non-matching-kind",
Namespace: "non-matching-ns",
Name: "non-matching-name",
},
{
Kind: rbacbeta.ServiceAccountKind,
Namespace: "heptio-ark",
Name: "ark",
},
},
RoleRef: rbacbeta.RoleRef{
Name: "role-2",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "crb-3",
},
Subjects: []rbacbeta.Subject{
{
Kind: rbacbeta.ServiceAccountKind,
Namespace: "heptio-ark",
Name: "ark",
},
},
RoleRef: rbacbeta.RoleRef{
Name: "role-3",
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "crb-4",
},
Subjects: []rbacbeta.Subject{
{
Kind: rbacbeta.ServiceAccountKind,
Namespace: "heptio-ark",
Name: "ark",
},
{
Kind: "non-matching-kind",
Namespace: "non-matching-ns",
Name: "non-matching-name",
},
},
RoleRef: rbacbeta.RoleRef{
Name: "role-4",
},
},
},
expectedAdditionalItems: []ResourceIdentifier{
{
GroupResource: kuberesource.ClusterRoleBindings,
Name: "crb-2",
},
{
GroupResource: kuberesource.ClusterRoleBindings,
Name: "crb-3",
},
{
GroupResource: kuberesource.ClusterRoleBindings,
Name: "crb-4",
},
{
GroupResource: kuberesource.ClusterRoles,
Name: "role-2",
},
{
GroupResource: kuberesource.ClusterRoles,
Name: "role-3",
},
{
GroupResource: kuberesource.ClusterRoles,
Name: "role-4",
},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
// Create the action struct directly so we don't need to mock a clientset
action := &serviceAccountAction{
log: arktest.NewLogger(),
clusterRoleBindings: newV1beta1ClusterRoleBindingList(test.crbs),
}
res, additional, err := action.Execute(test.serviceAccount, nil)
assert.Equal(t, test.serviceAccount, res)
assert.Nil(t, err)
// ensure slices are ordered for valid comparison
sort.Slice(test.expectedAdditionalItems, func(i, j int) bool {
return fmt.Sprintf("%s.%s", test.expectedAdditionalItems[i].GroupResource.String(), test.expectedAdditionalItems[i].Name) <
fmt.Sprintf("%s.%s", test.expectedAdditionalItems[j].GroupResource.String(), test.expectedAdditionalItems[j].Name)
})
sort.Slice(additional, func(i, j int) bool {
return fmt.Sprintf("%s.%s", additional[i].GroupResource.String(), additional[i].Name) <
fmt.Sprintf("%s.%s", additional[j].GroupResource.String(), additional[j].Name)
})
assert.Equal(t, test.expectedAdditionalItems, additional)
})
}
}

View File

@@ -23,6 +23,7 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/dynamic"
)
@@ -82,12 +83,20 @@ type Getter interface {
Get(name string, opts metav1.GetOptions) (*unstructured.Unstructured, error)
}
// Patcher patches an object.
type Patcher interface {
//Patch patches the named object using the provided patch bytes, which are expected to be in JSON merge patch format. The patched object is returned.
Patch(name string, data []byte) (*unstructured.Unstructured, error)
}
// Dynamic contains client methods that Ark needs for backing up and restoring resources.
type Dynamic interface {
Creator
Lister
Watcher
Getter
Patcher
}
// dynamicResourceClient implements Dynamic.
@@ -112,3 +121,7 @@ func (d *dynamicResourceClient) Watch(options metav1.ListOptions) (watch.Interfa
func (d *dynamicResourceClient) Get(name string, opts metav1.GetOptions) (*unstructured.Unstructured, error) {
return d.resourceClient.Get(name, opts)
}
func (d *dynamicResourceClient) Patch(name string, data []byte) (*unstructured.Unstructured, error) {
return d.resourceClient.Patch(name, types.MergePatchType, data)
}

View File

@@ -20,6 +20,7 @@ import (
"regexp"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/pkg/errors"
@@ -219,7 +220,17 @@ func (b *blockStore) DeleteSnapshot(snapshotID string) error {
_, err := b.ec2.DeleteSnapshot(req)
return errors.WithStack(err)
// if it's a NotFound error, we don't need to return an error
// since the snapshot is not there.
// see https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "InvalidSnapshot.NotFound" {
return nil
}
if err != nil {
return errors.WithStack(err)
}
return nil
}
var ebsVolumeIDRegex = regexp.MustCompile("vol-.*")

View File

@@ -0,0 +1,53 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws
import (
"context"
"github.com/pkg/errors"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
)
// GetBucketRegion returns the AWS region that a bucket is in, or an error
// if the region cannot be determined.
func GetBucketRegion(bucket string) (string, error) {
var region string
session, err := session.NewSession()
if err != nil {
return "", errors.WithStack(err)
}
for _, partition := range endpoints.DefaultPartitions() {
for regionHint := range partition.Regions() {
region, _ = s3manager.GetBucketRegion(context.Background(), session, bucket, regionHint)
// we only need to try a single region hint per partition, so break after the first
break
}
if region != "" {
return region, nil
}
}
return "", errors.New("unable to determine bucket's region")
}

View File

@@ -34,6 +34,7 @@ const (
s3URLKey = "s3Url"
kmsKeyIDKey = "kmsKeyId"
s3ForcePathStyleKey = "s3ForcePathStyle"
bucketKey = "bucket"
)
type objectStore struct {
@@ -52,13 +53,15 @@ func (o *objectStore) Init(config map[string]string) error {
s3URL = config[s3URLKey]
kmsKeyID = config[kmsKeyIDKey]
s3ForcePathStyleVal = config[s3ForcePathStyleKey]
s3ForcePathStyle bool
err error
)
if region == "" {
return errors.Errorf("missing %s in aws configuration", regionKey)
}
// note that bucket is automatically added to the config map
// by the server from the ObjectStorageProviderConfig so
// doesn't need to be explicitly set by the user within
// config.
bucket = config[bucketKey]
s3ForcePathStyle bool
err error
)
if s3ForcePathStyleVal != "" {
if s3ForcePathStyle, err = strconv.ParseBool(s3ForcePathStyleVal); err != nil {
@@ -66,6 +69,17 @@ func (o *objectStore) Init(config map[string]string) error {
}
}
// AWS (not an alternate S3-compatible API) and region not
// explicitly specified: determine the bucket's region
if s3URL == "" && region == "" {
var err error
region, err = GetBucketRegion(bucket)
if err != nil {
return err
}
}
awsConfig := aws.NewConfig().
WithRegion(region).
WithS3ForcePathStyle(s3ForcePathStyle)

View File

@@ -19,6 +19,7 @@ package azure
import (
"context"
"fmt"
"net/http"
"os"
"regexp"
"strings"
@@ -280,7 +281,16 @@ func (b *blockStore) DeleteSnapshot(snapshotID string) error {
err = <-errChan
return errors.WithStack(err)
// if it's a 404 (not found) error, we don't need to return an error
// since the snapshot is not there.
if azureErr, ok := err.(autorest.DetailedError); ok && azureErr.StatusCode == http.StatusNotFound {
return nil
}
if err != nil {
return errors.WithStack(err)
}
return nil
}
func getComputeResourceName(subscription, resourceGroup, resource, name string) string {

View File

@@ -19,14 +19,16 @@ package gcp
import (
"encoding/json"
"io/ioutil"
"net/http"
"os"
"github.com/pkg/errors"
uuid "github.com/satori/go.uuid"
"github.com/satori/uuid"
"github.com/sirupsen/logrus"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
"k8s.io/apimachinery/pkg/runtime"
@@ -212,7 +214,16 @@ func getSnapshotTags(arkTags map[string]string, diskDescription string, log logr
func (b *blockStore) DeleteSnapshot(snapshotID string) error {
_, err := b.gce.Snapshots.Delete(b.project, snapshotID).Do()
return errors.WithStack(err)
// if it's a 404 (not found) error, we don't need to return an error
// since the snapshot is not there.
if gcpErr, ok := err.(*googleapi.Error); ok && gcpErr.Code == http.StatusNotFound {
return nil
}
if err != nil {
return errors.WithStack(err)
}
return nil
}
func (b *blockStore) GetVolumeID(pv runtime.Unstructured) (string, error) {

View File

@@ -35,10 +35,25 @@ import (
const credentialsEnvVar = "GOOGLE_APPLICATION_CREDENTIALS"
// bucketWriter wraps the GCP SDK functions for accessing object store so they can be faked for testing.
type bucketWriter interface {
// getWriteCloser returns an io.WriteCloser that can be used to upload data to the specified bucket for the specified key.
getWriteCloser(bucket, key string) io.WriteCloser
}
type writer struct {
client *storage.Client
}
func (w *writer) getWriteCloser(bucket, key string) io.WriteCloser {
return w.client.Bucket(bucket).Object(key).NewWriter(context.Background())
}
type objectStore struct {
client *storage.Client
googleAccessID string
privateKey []byte
bucketWriter bucketWriter
}
func NewObjectStore() cloudprovider.ObjectStore {
@@ -76,16 +91,25 @@ func (o *objectStore) Init(config map[string]string) error {
}
o.client = client
o.bucketWriter = &writer{client: o.client}
return nil
}
func (o *objectStore) PutObject(bucket string, key string, body io.Reader) error {
w := o.client.Bucket(bucket).Object(key).NewWriter(context.Background())
defer w.Close()
w := o.bucketWriter.getWriteCloser(bucket, key)
_, err := io.Copy(w, body)
// The writer returned by NewWriter is asynchronous, so errors aren't guaranteed
// until Close() is called
_, copyErr := io.Copy(w, body)
return errors.WithStack(err)
// Ensure we close w and report errors properly
closeErr := w.Close()
if copyErr != nil {
return copyErr
}
return closeErr
}
func (o *objectStore) GetObject(bucket string, key string) (io.ReadCloser, error) {

View File

@@ -0,0 +1,98 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gcp
import (
"errors"
"io"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
type mockWriteCloser struct {
closeErr error
writeErr error
}
func (m *mockWriteCloser) Close() error {
return m.closeErr
}
func (m *mockWriteCloser) Write(b []byte) (int, error) {
return len(b), m.writeErr
}
func newMockWriteCloser(writeErr, closeErr error) *mockWriteCloser {
return &mockWriteCloser{writeErr: writeErr, closeErr: closeErr}
}
type fakeWriter struct {
wc *mockWriteCloser
}
func newFakeWriter(wc *mockWriteCloser) *fakeWriter {
return &fakeWriter{wc: wc}
}
func (fw *fakeWriter) getWriteCloser(bucket, name string) io.WriteCloser {
return fw.wc
}
func TestPutObject(t *testing.T) {
tests := []struct {
name string
writeErr error
closeErr error
expectedErr error
}{
{
name: "No errors returns nil",
closeErr: nil,
writeErr: nil,
expectedErr: nil,
},
{
name: "Close() errors are returned",
closeErr: errors.New("error closing"),
expectedErr: errors.New("error closing"),
},
{
name: "Write() errors are returned",
writeErr: errors.New("error writing"),
expectedErr: errors.New("error writing"),
},
{
name: "Write errors supercede close errors",
writeErr: errors.New("error writing"),
closeErr: errors.New("error closing"),
expectedErr: errors.New("error writing"),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
wc := newMockWriteCloser(test.writeErr, test.closeErr)
o := NewObjectStore().(*objectStore)
o.bucketWriter = newFakeWriter(wc)
err := o.PutObject("bucket", "key", strings.NewReader("contents"))
assert.Equal(t, test.expectedErr, err)
})
}
}

View File

@@ -30,6 +30,7 @@ import (
"github.com/heptio/ark/pkg/cmd/cli/describe"
"github.com/heptio/ark/pkg/cmd/cli/get"
"github.com/heptio/ark/pkg/cmd/cli/plugin"
"github.com/heptio/ark/pkg/cmd/cli/restic"
"github.com/heptio/ark/pkg/cmd/cli/restore"
"github.com/heptio/ark/pkg/cmd/cli/schedule"
"github.com/heptio/ark/pkg/cmd/server"
@@ -62,11 +63,12 @@ operations can also be performed as 'ark backup get' and 'ark schedule create'.`
get.NewCommand(f),
describe.NewCommand(f),
create.NewCommand(f),
runplugin.NewCommand(),
runplugin.NewCommand(f),
plugin.NewCommand(f),
delete.NewCommand(f),
cliclient.NewCommand(),
completion.NewCommand(),
restic.NewCommand(f),
)
// add the glog flags

View File

@@ -20,18 +20,22 @@ import (
"fmt"
"os"
pkgbackup "github.com/heptio/ark/pkg/backup"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/heptio/ark/pkg/apis/ark/v1"
pkgbackup "github.com/heptio/ark/pkg/backup"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd"
"github.com/heptio/ark/pkg/cmd/util/output"
"github.com/heptio/ark/pkg/restic"
)
func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
var listOptions metav1.ListOptions
var (
listOptions metav1.ListOptions
volumeDetails bool
)
c := &cobra.Command{
Use: use + " [NAME1] [NAME2] [NAME...]",
@@ -61,7 +65,13 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
fmt.Fprintf(os.Stderr, "error getting DeleteBackupRequests for backup %s: %v\n", backup.Name, err)
}
s := output.DescribeBackup(&backup, deleteRequestList.Items)
opts := restic.NewPodVolumeBackupListOptions(backup.Name, string(backup.UID))
podVolumeBackupList, err := arkClient.ArkV1().PodVolumeBackups(f.Namespace()).List(opts)
if err != nil {
fmt.Fprintf(os.Stderr, "error getting PodVolumeBackups for backup %s: %v\n", backup.Name, err)
}
s := output.DescribeBackup(&backup, deleteRequestList.Items, podVolumeBackupList.Items, volumeDetails)
if first {
first = false
fmt.Print(s)
@@ -74,6 +84,7 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
}
c.Flags().StringVarP(&listOptions.LabelSelector, "selector", "l", listOptions.LabelSelector, "only show items matching this label selector")
c.Flags().BoolVar(&volumeDetails, "volume-details", volumeDetails, "display details of restic volume backups")
return c
}

View File

@@ -21,14 +21,13 @@ import (
"fmt"
"strings"
jsonpatch "github.com/evanphx/json-patch"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"k8s.io/api/apps/v1beta1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd"
@@ -124,10 +123,10 @@ func NewAddCommand(f client.Factory) *cobra.Command {
updated, err := json.Marshal(arkDeploy)
cmd.CheckError(err)
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(original, updated, v1beta1.Deployment{})
patchBytes, err := jsonpatch.CreateMergePatch(original, updated)
cmd.CheckError(err)
_, err = kubeClient.AppsV1beta1().Deployments(arkDeploy.Namespace).Patch(arkDeploy.Name, types.StrategicMergePatchType, patchBytes)
_, err = kubeClient.AppsV1beta1().Deployments(arkDeploy.Namespace).Patch(arkDeploy.Name, types.MergePatchType, patchBytes)
cmd.CheckError(err)
},
}

View File

@@ -19,13 +19,12 @@ package plugin
import (
"encoding/json"
jsonpatch "github.com/evanphx/json-patch"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"k8s.io/api/apps/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd"
@@ -71,10 +70,10 @@ func NewRemoveCommand(f client.Factory) *cobra.Command {
updated, err := json.Marshal(arkDeploy)
cmd.CheckError(err)
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(original, updated, v1beta1.Deployment{})
patchBytes, err := jsonpatch.CreateMergePatch(original, updated)
cmd.CheckError(err)
_, err = kubeClient.AppsV1beta1().Deployments(arkDeploy.Namespace).Patch(arkDeploy.Name, types.StrategicMergePatchType, patchBytes)
_, err = kubeClient.AppsV1beta1().Deployments(arkDeploy.Namespace).Patch(arkDeploy.Name, types.MergePatchType, patchBytes)
cmd.CheckError(err)
},
}

View File

@@ -0,0 +1,66 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package repo
import (
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd"
"github.com/heptio/ark/pkg/cmd/util/output"
)
func NewGetCommand(f client.Factory, use string) *cobra.Command {
var listOptions metav1.ListOptions
c := &cobra.Command{
Use: use,
Short: "Get restic repositories",
Run: func(c *cobra.Command, args []string) {
err := output.ValidateFlags(c)
cmd.CheckError(err)
arkClient, err := f.Client()
cmd.CheckError(err)
var repos *api.ResticRepositoryList
if len(args) > 0 {
repos = new(api.ResticRepositoryList)
for _, name := range args {
repo, err := arkClient.Ark().ResticRepositories(f.Namespace()).Get(name, metav1.GetOptions{})
cmd.CheckError(err)
repos.Items = append(repos.Items, *repo)
}
} else {
repos, err = arkClient.ArkV1().ResticRepositories(f.Namespace()).List(listOptions)
cmd.CheckError(err)
}
_, err = output.PrintWithFormat(c, repos)
cmd.CheckError(err)
},
}
c.Flags().StringVarP(&listOptions.LabelSelector, "selector", "l", listOptions.LabelSelector, "only show items matching this label selector")
output.BindFlags(c.Flags())
return c
}

View File

@@ -0,0 +1,37 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package repo
import (
"github.com/spf13/cobra"
"github.com/heptio/ark/pkg/client"
)
func NewRepositoryCommand(f client.Factory) *cobra.Command {
c := &cobra.Command{
Use: "repo",
Short: "Work with restic repositories",
Long: "Work with restic repositories",
}
c.AddCommand(
NewGetCommand(f, "get"),
)
return c
}

View File

@@ -0,0 +1,39 @@
/*
Copyright 2018 the Heptio Ark contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package restic
import (
"github.com/spf13/cobra"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd/cli/restic/repo"
)
func NewCommand(f client.Factory) *cobra.Command {
c := &cobra.Command{
Use: "restic",
Short: "Work with restic",
Long: "Work with restic",
}
c.AddCommand(
repo.NewRepositoryCommand(f),
NewServerCommand(f),
)
return c
}

View File

@@ -0,0 +1,176 @@
package restic
import (
"context"
"fmt"
"os"
"strings"
"sync"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeinformers "k8s.io/client-go/informers"
corev1informers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"github.com/heptio/ark/pkg/buildinfo"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd"
"github.com/heptio/ark/pkg/cmd/util/signals"
"github.com/heptio/ark/pkg/controller"
clientset "github.com/heptio/ark/pkg/generated/clientset/versioned"
informers "github.com/heptio/ark/pkg/generated/informers/externalversions"
"github.com/heptio/ark/pkg/restic"
"github.com/heptio/ark/pkg/util/logging"
)
func NewServerCommand(f client.Factory) *cobra.Command {
var logLevelFlag = logging.LogLevelFlag(logrus.InfoLevel)
var command = &cobra.Command{
Use: "server",
Short: "Run the ark restic server",
Long: "Run the ark restic server",
Run: func(c *cobra.Command, args []string) {
logLevel := logLevelFlag.Parse()
logrus.Infof("Setting log-level to %s", strings.ToUpper(logLevel.String()))
logger := logging.DefaultLogger(logLevel)
logger.Infof("Starting Ark restic server %s", buildinfo.FormattedGitSHA())
s, err := newResticServer(logger, fmt.Sprintf("%s-%s", c.Parent().Name(), c.Name()))
cmd.CheckError(err)
s.run()
},
}
command.Flags().Var(logLevelFlag, "log-level", fmt.Sprintf("the level at which to log. Valid values are %s.", strings.Join(logLevelFlag.AllowedValues(), ", ")))
return command
}
type resticServer struct {
kubeClient kubernetes.Interface
arkClient clientset.Interface
arkInformerFactory informers.SharedInformerFactory
kubeInformerFactory kubeinformers.SharedInformerFactory
podInformer cache.SharedIndexInformer
secretInformer cache.SharedIndexInformer
logger logrus.FieldLogger
ctx context.Context
cancelFunc context.CancelFunc
}
func newResticServer(logger logrus.FieldLogger, baseName string) (*resticServer, error) {
clientConfig, err := client.Config("", "", baseName)
if err != nil {
return nil, err
}
kubeClient, err := kubernetes.NewForConfig(clientConfig)
if err != nil {
return nil, errors.WithStack(err)
}
arkClient, err := clientset.NewForConfig(clientConfig)
if err != nil {
return nil, errors.WithStack(err)
}
// use a stand-alone pod informer because we want to use a field selector to
// filter to only pods scheduled on this node.
podInformer := corev1informers.NewFilteredPodInformer(
kubeClient,
metav1.NamespaceAll,
0,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
func(opts *metav1.ListOptions) {
opts.FieldSelector = fmt.Sprintf("spec.nodeName=%s", os.Getenv("NODE_NAME"))
},
)
// use a stand-alone secrets informer so we can filter to only the restic credentials
// secret(s) within the heptio-ark namespace
//
// note: using an informer to access the single secret for all ark-managed
// restic repositories is overkill for now, but will be useful when we move
// to fully-encrypted backups and have unique keys per repository.
secretInformer := corev1informers.NewFilteredSecretInformer(
kubeClient,
os.Getenv("HEPTIO_ARK_NAMESPACE"),
0,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
func(opts *metav1.ListOptions) {
opts.FieldSelector = fmt.Sprintf("metadata.name=%s", restic.CredentialsSecretName)
},
)
ctx, cancelFunc := context.WithCancel(context.Background())
return &resticServer{
kubeClient: kubeClient,
arkClient: arkClient,
arkInformerFactory: informers.NewFilteredSharedInformerFactory(arkClient, 0, os.Getenv("HEPTIO_ARK_NAMESPACE"), nil),
kubeInformerFactory: kubeinformers.NewSharedInformerFactory(kubeClient, 0),
podInformer: podInformer,
secretInformer: secretInformer,
logger: logger,
ctx: ctx,
cancelFunc: cancelFunc,
}, nil
}
func (s *resticServer) run() {
signals.CancelOnShutdown(s.cancelFunc, s.logger)
s.logger.Info("Starting controllers")
var wg sync.WaitGroup
backupController := controller.NewPodVolumeBackupController(
s.logger,
s.arkInformerFactory.Ark().V1().PodVolumeBackups(),
s.arkClient.ArkV1(),
s.podInformer,
s.secretInformer,
s.kubeInformerFactory.Core().V1().PersistentVolumeClaims(),
os.Getenv("NODE_NAME"),
)
wg.Add(1)
go func() {
defer wg.Done()
backupController.Run(s.ctx, 1)
}()
restoreController := controller.NewPodVolumeRestoreController(
s.logger,
s.arkInformerFactory.Ark().V1().PodVolumeRestores(),
s.arkClient.ArkV1(),
s.podInformer,
s.secretInformer,
s.kubeInformerFactory.Core().V1().PersistentVolumeClaims(),
os.Getenv("NODE_NAME"),
)
wg.Add(1)
go func() {
defer wg.Done()
restoreController.Run(s.ctx, 1)
}()
go s.arkInformerFactory.Start(s.ctx.Done())
go s.kubeInformerFactory.Start(s.ctx.Done())
go s.podInformer.Run(s.ctx.Done())
go s.secretInformer.Run(s.ctx.Done())
s.logger.Info("Controllers started successfully")
<-s.ctx.Done()
s.logger.Info("Waiting for all controllers to shut down gracefully")
wg.Wait()
}

View File

@@ -18,6 +18,7 @@ package restore
import (
"fmt"
"os"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -26,10 +27,14 @@ import (
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd"
"github.com/heptio/ark/pkg/cmd/util/output"
"github.com/heptio/ark/pkg/restic"
)
func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
var listOptions metav1.ListOptions
var (
listOptions metav1.ListOptions
volumeDetails bool
)
c := &cobra.Command{
Use: use + " [NAME1] [NAME2] [NAME...]",
@@ -53,7 +58,13 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
first := true
for _, restore := range restores.Items {
s := output.DescribeRestore(&restore, arkClient)
opts := restic.NewPodVolumeRestoreListOptions(restore.Name, string(restore.UID))
podvolumeRestoreList, err := arkClient.ArkV1().PodVolumeRestores(f.Namespace()).List(opts)
if err != nil {
fmt.Fprintf(os.Stderr, "error getting PodVolumeRestores for restore %s: %v\n", restore.Name, err)
}
s := output.DescribeRestore(&restore, podvolumeRestoreList.Items, volumeDetails, arkClient)
if first {
first = false
fmt.Print(s)
@@ -66,6 +77,7 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command {
}
c.Flags().StringVarP(&listOptions.LabelSelector, "selector", "l", listOptions.LabelSelector, "only show items matching this label selector")
c.Flags().BoolVar(&volumeDetails, "volume-details", volumeDetails, "display details of restic volume restores")
return c
}

View File

@@ -20,6 +20,7 @@ import (
"os"
"time"
"github.com/pkg/errors"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -82,6 +83,12 @@ func (l *LogsOptions) Validate(f client.Factory) error {
}
l.client = c
_, err = l.client.ArkV1().Restores(f.Namespace()).Get(l.RestoreName, metav1.GetOptions{})
return err
r, err := l.client.ArkV1().Restores(f.Namespace()).Get(l.RestoreName, metav1.GetOptions{})
if err != nil {
return err
}
if r.Status.Phase != v1.RestorePhaseCompleted {
return errors.Errorf("unable to retrieve logs because restore is not complete")
}
return nil
}

View File

@@ -22,40 +22,20 @@ import (
"github.com/spf13/cobra"
"github.com/heptio/ark/pkg/backup"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cloudprovider"
"github.com/heptio/ark/pkg/cloudprovider/aws"
"github.com/heptio/ark/pkg/cloudprovider/azure"
"github.com/heptio/ark/pkg/cloudprovider/gcp"
"github.com/heptio/ark/pkg/cmd"
arkdiscovery "github.com/heptio/ark/pkg/discovery"
arkplugin "github.com/heptio/ark/pkg/plugin"
"github.com/heptio/ark/pkg/restore"
)
func NewCommand() *cobra.Command {
func NewCommand(f client.Factory) *cobra.Command {
logger := arkplugin.NewLogger()
objectStores := map[string]cloudprovider.ObjectStore{
"aws": aws.NewObjectStore(),
"gcp": gcp.NewObjectStore(),
"azure": azure.NewObjectStore(),
}
blockStores := map[string]cloudprovider.BlockStore{
"aws": aws.NewBlockStore(),
"gcp": gcp.NewBlockStore(logger),
"azure": azure.NewBlockStore(),
}
backupItemActions := map[string]backup.ItemAction{
"pv": backup.NewBackupPVAction(logger),
"pod": backup.NewPodAction(logger),
}
restoreItemActions := map[string]restore.ItemAction{
"job": restore.NewJobAction(logger),
"pod": restore.NewPodAction(logger),
"svc": restore.NewServiceAction(logger),
}
c := &cobra.Command{
Use: "run-plugin [KIND] [NAME]",
Hidden: true,
@@ -72,18 +52,24 @@ func NewCommand() *cobra.Command {
GRPCServer: plugin.DefaultGRPCServer,
}
logger.Debugf("Executing run-plugin command")
logger.Debug("Executing run-plugin command")
switch kind {
case "cloudprovider":
objectStore, found := objectStores[name]
if !found {
logger.Fatalf("Unrecognized plugin name")
}
var (
objectStore cloudprovider.ObjectStore
blockStore cloudprovider.BlockStore
)
blockStore, found := blockStores[name]
if !found {
logger.Fatalf("Unrecognized plugin name")
switch name {
case "aws":
objectStore, blockStore = aws.NewObjectStore(), aws.NewBlockStore()
case "azure":
objectStore, blockStore = azure.NewObjectStore(), azure.NewBlockStore()
case "gcp":
objectStore, blockStore = gcp.NewObjectStore(), gcp.NewBlockStore(logger)
default:
logger.Fatal("Unrecognized plugin name")
}
serveConfig.Plugins = map[string]plugin.Plugin{
@@ -91,25 +77,53 @@ func NewCommand() *cobra.Command {
string(arkplugin.PluginKindBlockStore): arkplugin.NewBlockStorePlugin(blockStore),
}
case arkplugin.PluginKindBackupItemAction.String():
action, found := backupItemActions[name]
if !found {
logger.Fatalf("Unrecognized plugin name")
var action backup.ItemAction
switch name {
case "pv":
action = backup.NewBackupPVAction(logger)
case "pod":
action = backup.NewPodAction(logger)
case "serviceaccount":
clientset, err := f.KubeClient()
cmd.CheckError(err)
discoveryHelper, err := arkdiscovery.NewHelper(clientset.Discovery(), logger)
cmd.CheckError(err)
action, err = backup.NewServiceAccountAction(
logger,
backup.NewClusterRoleBindingListerMap(clientset),
discoveryHelper)
cmd.CheckError(err)
default:
logger.Fatal("Unrecognized plugin name")
}
serveConfig.Plugins = map[string]plugin.Plugin{
kind: arkplugin.NewBackupItemActionPlugin(action),
}
case arkplugin.PluginKindRestoreItemAction.String():
action, found := restoreItemActions[name]
if !found {
logger.Fatalf("Unrecognized plugin name")
var action restore.ItemAction
switch name {
case "job":
action = restore.NewJobAction(logger)
case "pod":
action = restore.NewPodAction(logger)
case "svc":
action = restore.NewServiceAction(logger)
case "restic":
action = restore.NewResticRestoreAction(logger)
default:
logger.Fatal("Unrecognized plugin name")
}
serveConfig.Plugins = map[string]plugin.Plugin{
kind: arkplugin.NewRestoreItemActionPlugin(action),
}
default:
logger.Fatalf("Unsupported plugin kind")
logger.Fatal("Unsupported plugin kind")
}
plugin.Serve(serveConfig)

View File

@@ -21,54 +21,65 @@ import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"reflect"
"sort"
"strings"
"sync"
"time"
"github.com/heptio/ark/pkg/buildinfo"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
kubeerrs "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
corev1informers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
kcorev1client "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/backup"
"github.com/heptio/ark/pkg/buildinfo"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cloudprovider"
"github.com/heptio/ark/pkg/cmd"
"github.com/heptio/ark/pkg/cmd/util/flag"
"github.com/heptio/ark/pkg/cmd/util/signals"
"github.com/heptio/ark/pkg/controller"
arkdiscovery "github.com/heptio/ark/pkg/discovery"
clientset "github.com/heptio/ark/pkg/generated/clientset/versioned"
arkv1client "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1"
informers "github.com/heptio/ark/pkg/generated/informers/externalversions"
"github.com/heptio/ark/pkg/metrics"
"github.com/heptio/ark/pkg/plugin"
"github.com/heptio/ark/pkg/podexec"
"github.com/heptio/ark/pkg/restic"
"github.com/heptio/ark/pkg/restore"
"github.com/heptio/ark/pkg/util/kube"
"github.com/heptio/ark/pkg/util/logging"
"github.com/heptio/ark/pkg/util/stringslice"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
const (
// the port where prometheus metrics are exposed
defaultMetricsAddress = ":8085"
)
func NewCommand() *cobra.Command {
var (
sortedLogLevels = getSortedLogLevels()
logLevelFlag = flag.NewEnum(logrus.InfoLevel.String(), sortedLogLevels...)
pluginDir = "/plugins"
logLevelFlag = logging.LogLevelFlag(logrus.InfoLevel)
pluginDir = "/plugins"
metricsAddress = defaultMetricsAddress
)
var command = &cobra.Command{
@@ -76,19 +87,17 @@ func NewCommand() *cobra.Command {
Short: "Run the ark server",
Long: "Run the ark server",
Run: func(c *cobra.Command, args []string) {
logLevel := logrus.InfoLevel
// go-plugin uses log.Println to log when it's waiting for all plugin processes to complete so we need to
// set its output to stdout.
log.SetOutput(os.Stdout)
if parsed, err := logrus.ParseLevel(logLevelFlag.String()); err == nil {
logLevel = parsed
} else {
// This should theoretically never happen assuming the enum flag
// is constructed correctly because the enum flag will not allow
// an invalid value to be set.
logrus.Errorf("log-level flag has invalid value %s", strings.ToUpper(logLevelFlag.String()))
}
logLevel := logLevelFlag.Parse()
// Make sure we log to stdout so cloud log dashboards don't show this as an error.
logrus.SetOutput(os.Stdout)
logrus.Infof("setting log-level to %s", strings.ToUpper(logLevel.String()))
logger := newLogger(logLevel, &logging.ErrorLocationHook{}, &logging.LogLocationHook{})
// Ark's DefaultLogger logs to stdout, so all is good there.
logger := logging.DefaultLogger(logLevel)
logger.Infof("Starting Ark server %s", buildinfo.FormattedGitSHA())
// NOTE: the namespace flag is bound to ark's persistent flags when the root ark command
@@ -105,16 +114,16 @@ func NewCommand() *cobra.Command {
}
namespace := getServerNamespace(namespaceFlag)
s, err := newServer(namespace, fmt.Sprintf("%s-%s", c.Parent().Name(), c.Name()), pluginDir, logger)
s, err := newServer(namespace, fmt.Sprintf("%s-%s", c.Parent().Name(), c.Name()), pluginDir, metricsAddress, logger)
cmd.CheckError(err)
cmd.CheckError(s.run())
},
}
command.Flags().Var(logLevelFlag, "log-level", fmt.Sprintf("the level at which to log. Valid values are %s.", strings.Join(sortedLogLevels, ", ")))
command.Flags().Var(logLevelFlag, "log-level", fmt.Sprintf("the level at which to log. Valid values are %s.", strings.Join(logLevelFlag.AllowedValues(), ", ")))
command.Flags().StringVar(&pluginDir, "plugin-dir", pluginDir, "directory containing Ark plugins")
command.Flags().StringVar(&metricsAddress, "metrics-address", metricsAddress, "the address to expose prometheus metrics")
return command
}
@@ -133,54 +142,28 @@ func getServerNamespace(namespaceFlag *pflag.Flag) string {
return api.DefaultNamespace
}
func newLogger(level logrus.Level, hooks ...logrus.Hook) *logrus.Logger {
logger := logrus.New()
logger.Level = level
for _, hook := range hooks {
logger.Hooks.Add(hook)
}
return logger
}
// getSortedLogLevels returns a string slice containing all of the valid logrus
// log levels (based on logrus.AllLevels), sorted in ascending order of severity.
func getSortedLogLevels() []string {
var (
sortedLogLevels = make([]logrus.Level, len(logrus.AllLevels))
logLevelsStrings []string
)
copy(sortedLogLevels, logrus.AllLevels)
// logrus.Panic has the lowest value, so the compare function uses ">"
sort.Slice(sortedLogLevels, func(i, j int) bool { return sortedLogLevels[i] > sortedLogLevels[j] })
for _, level := range sortedLogLevels {
logLevelsStrings = append(logLevelsStrings, level.String())
}
return logLevelsStrings
}
type server struct {
namespace string
metricsAddress string
kubeClientConfig *rest.Config
kubeClient kubernetes.Interface
arkClient clientset.Interface
objectStore cloudprovider.ObjectStore
backupService cloudprovider.BackupService
snapshotService cloudprovider.SnapshotService
discoveryClient discovery.DiscoveryInterface
clientPool dynamic.ClientPool
discoveryHelper arkdiscovery.Helper
sharedInformerFactory informers.SharedInformerFactory
ctx context.Context
cancelFunc context.CancelFunc
logger logrus.FieldLogger
pluginManager plugin.Manager
resticManager restic.RepositoryManager
metrics *metrics.ServerMetrics
}
func newServer(namespace, baseName, pluginDir string, logger *logrus.Logger) (*server, error) {
func newServer(namespace, baseName, pluginDir, metricsAddr string, logger *logrus.Logger) (*server, error) {
clientConfig, err := client.Config("", "", baseName)
if err != nil {
return nil, err
@@ -205,6 +188,7 @@ func newServer(namespace, baseName, pluginDir string, logger *logrus.Logger) (*s
s := &server{
namespace: namespace,
metricsAddress: metricsAddr,
kubeClientConfig: clientConfig,
kubeClient: kubeClient,
arkClient: arkClient,
@@ -221,7 +205,23 @@ func newServer(namespace, baseName, pluginDir string, logger *logrus.Logger) (*s
}
func (s *server) run() error {
if err := s.ensureArkNamespace(); err != nil {
defer s.pluginManager.CleanupClients()
signals.CancelOnShutdown(s.cancelFunc, s.logger)
// Since s.namespace, which specifies where backups/restores/schedules/etc. should live,
// *could* be different from the namespace where the Ark server pod runs, check to make
// sure it exists, and fail fast if it doesn't.
if err := s.namespaceExists(s.namespace); err != nil {
return err
}
if err := s.initDiscoveryHelper(); err != nil {
return err
}
// check to ensure all Ark CRDs exist
if err := s.arkResourcesExist(); err != nil {
return err
}
@@ -245,6 +245,12 @@ func (s *server) run() error {
return err
}
if config.BackupStorageProvider.ResticLocation != "" {
if err := s.initRestic(config.BackupStorageProvider); err != nil {
return err
}
}
if err := s.runControllers(config); err != nil {
return err
}
@@ -252,22 +258,78 @@ func (s *server) run() error {
return nil
}
func (s *server) ensureArkNamespace() error {
logContext := s.logger.WithField("namespace", s.namespace)
// namespaceExists returns nil if namespace can be successfully
// gotten from the kubernetes API, or an error otherwise.
func (s *server) namespaceExists(namespace string) error {
s.logger.WithField("namespace", namespace).Info("Checking existence of namespace")
logContext.Info("Ensuring namespace exists for backups")
defaultNamespace := v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: s.namespace,
},
if _, err := s.kubeClient.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{}); err != nil {
return errors.WithStack(err)
}
if created, err := kube.EnsureNamespaceExists(&defaultNamespace, s.kubeClient.CoreV1().Namespaces()); created {
logContext.Info("Namespace created")
} else if err != nil {
s.logger.WithField("namespace", namespace).Info("Namespace exists")
return nil
}
// initDiscoveryHelper instantiates the server's discovery helper and spawns a
// goroutine to call Refresh() every 5 minutes.
func (s *server) initDiscoveryHelper() error {
discoveryHelper, err := arkdiscovery.NewHelper(s.discoveryClient, s.logger)
if err != nil {
return err
}
logContext.Info("Namespace already exists")
s.discoveryHelper = discoveryHelper
go wait.Until(
func() {
if err := discoveryHelper.Refresh(); err != nil {
s.logger.WithError(err).Error("Error refreshing discovery")
}
},
5*time.Minute,
s.ctx.Done(),
)
return nil
}
// arkResourcesExist checks for the existence of each Ark CRD via discovery
// and returns an error if any of them don't exist.
func (s *server) arkResourcesExist() error {
s.logger.Info("Checking existence of Ark custom resource definitions")
var arkGroupVersion *metav1.APIResourceList
for _, gv := range s.discoveryHelper.Resources() {
if gv.GroupVersion == api.SchemeGroupVersion.String() {
arkGroupVersion = gv
break
}
}
if arkGroupVersion == nil {
return errors.Errorf("Ark API group %s not found", api.SchemeGroupVersion)
}
foundResources := sets.NewString()
for _, resource := range arkGroupVersion.APIResources {
foundResources.Insert(resource.Kind)
}
var errs []error
for kind := range api.CustomResources() {
if foundResources.Has(kind) {
s.logger.WithField("kind", kind).Debug("Found custom resource")
continue
}
errs = append(errs, errors.Errorf("custom resource %s not found in Ark API group %s", kind, api.SchemeGroupVersion))
}
if len(errs) > 0 {
return kubeerrs.NewAggregate(errs)
}
s.logger.Info("All Ark custom resource definitions exist")
return nil
}
@@ -295,11 +357,21 @@ func (s *server) loadConfig() (*api.Config, error) {
}
const (
defaultGCSyncPeriod = 60 * time.Minute
defaultBackupSyncPeriod = 60 * time.Minute
defaultScheduleSyncPeriod = time.Minute
defaultGCSyncPeriod = 60 * time.Minute
defaultBackupSyncPeriod = 60 * time.Minute
defaultScheduleSyncPeriod = time.Minute
defaultPodVolumeOperationTimeout = 60 * time.Minute
)
// - Namespaces go first because all namespaced resources depend on them.
// - PVs go before PVCs because PVCs depend on them.
// - PVCs go before pods or controllers so they can be mounted as volumes.
// - Secrets and config maps go before pods or controllers so they can be mounted
// as volumes.
// - Service accounts go before pods or controllers so pods can use them.
// - Limit ranges go before pods or controllers so pods can use them.
// - Pods go before controllers so they can be explicitly restored and potentially
// have restic restores run before controllers adopt the pods.
var defaultResourcePriorities = []string{
"namespaces",
"persistentvolumes",
@@ -308,6 +380,7 @@ var defaultResourcePriorities = []string{
"configmaps",
"serviceaccounts",
"limitranges",
"pods",
}
func applyConfigDefaults(c *api.Config, logger logrus.FieldLogger) {
@@ -323,12 +396,25 @@ func applyConfigDefaults(c *api.Config, logger logrus.FieldLogger) {
c.ScheduleSyncPeriod.Duration = defaultScheduleSyncPeriod
}
if c.PodVolumeOperationTimeout.Duration == 0 {
c.PodVolumeOperationTimeout.Duration = defaultPodVolumeOperationTimeout
}
if len(c.ResourcePriorities) == 0 {
c.ResourcePriorities = defaultResourcePriorities
logger.WithField("priorities", c.ResourcePriorities).Info("Using default resource priorities")
} else {
logger.WithField("priorities", c.ResourcePriorities).Info("Using resource priorities from config")
}
if c.BackupStorageProvider.Config == nil {
c.BackupStorageProvider.Config = make(map[string]string)
}
// add the bucket name to the config map so that object stores can use
// it when initializing. The AWS object store uses this to determine the
// bucket's region when setting up its client.
c.BackupStorageProvider.Config["bucket"] = c.BackupStorageProvider.Bucket
}
// watchConfig adds an update event handler to the Config shared informer, invoking s.cancelFunc
@@ -371,6 +457,7 @@ func (s *server) initBackupService(config *api.Config) error {
return err
}
s.objectStore = objectStore
s.backupService = cloudprovider.NewBackupService(objectStore, s.logger)
return nil
}
@@ -431,6 +518,59 @@ func durationMin(a, b time.Duration) time.Duration {
return b
}
func (s *server) initRestic(config api.ObjectStorageProviderConfig) error {
// warn if restic daemonset does not exist
if _, err := s.kubeClient.AppsV1().DaemonSets(s.namespace).Get(restic.DaemonSet, metav1.GetOptions{}); apierrors.IsNotFound(err) {
s.logger.Warn("Ark restic daemonset not found; restic backups/restores will not work until it's created")
} else if err != nil {
s.logger.WithError(errors.WithStack(err)).Warn("Error checking for existence of ark restic daemonset")
}
// ensure the repo key secret is set up
if err := restic.EnsureCommonRepositoryKey(s.kubeClient.CoreV1(), s.namespace); err != nil {
return err
}
// set the env vars that restic uses for creds purposes
if config.Name == string(restic.AzureBackend) {
os.Setenv("AZURE_ACCOUNT_NAME", os.Getenv("AZURE_STORAGE_ACCOUNT_ID"))
os.Setenv("AZURE_ACCOUNT_KEY", os.Getenv("AZURE_STORAGE_KEY"))
}
// use a stand-alone secrets informer so we can filter to only the restic credentials
// secret(s) within the heptio-ark namespace
//
// note: using an informer to access the single secret for all ark-managed
// restic repositories is overkill for now, but will be useful when we move
// to fully-encrypted backups and have unique keys per repository.
secretsInformer := corev1informers.NewFilteredSecretInformer(
s.kubeClient,
s.namespace,
0,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
func(opts *metav1.ListOptions) {
opts.FieldSelector = fmt.Sprintf("metadata.name=%s", restic.CredentialsSecretName)
},
)
go secretsInformer.Run(s.ctx.Done())
res, err := restic.NewRepositoryManager(
s.ctx,
s.namespace,
s.arkClient,
secretsInformer,
s.sharedInformerFactory.Ark().V1().ResticRepositories(),
s.arkClient.ArkV1(),
s.logger,
)
if err != nil {
return err
}
s.resticManager = res
return nil
}
func (s *server) runControllers(config *api.Config) error {
s.logger.Info("Starting controllers")
@@ -446,11 +586,23 @@ func (s *server) runControllers(config *api.Config) error {
s.logger,
)
go func() {
metricsMux := http.NewServeMux()
metricsMux.Handle("/metrics", promhttp.Handler())
s.logger.Infof("Starting metric server at address [%s]", s.metricsAddress)
if err := http.ListenAndServe(s.metricsAddress, metricsMux); err != nil {
s.logger.Fatalf("Failed to start metric server at [%s]: %v", s.metricsAddress, err)
}
}()
s.metrics = metrics.NewServerMetrics()
s.metrics.RegisterAllMetrics()
backupSyncController := controller.NewBackupSyncController(
s.arkClient.ArkV1(),
s.backupService,
config.BackupStorageProvider.Bucket,
config.BackupSyncPeriod.Duration,
s.namespace,
s.logger,
)
wg.Add(1)
@@ -459,27 +611,21 @@ func (s *server) runControllers(config *api.Config) error {
wg.Done()
}()
discoveryHelper, err := arkdiscovery.NewHelper(s.discoveryClient, s.logger)
if err != nil {
return err
}
go wait.Until(
func() {
if err := discoveryHelper.Refresh(); err != nil {
s.logger.WithError(err).Error("Error refreshing discovery")
}
},
5*time.Minute,
ctx.Done(),
)
if config.RestoreOnlyMode {
s.logger.Info("Restore only mode - not starting the backup, schedule, delete-backup, or GC controllers")
} else {
backupTracker := controller.NewBackupTracker()
backupper, err := newBackupper(discoveryHelper, s.clientPool, s.backupService, s.snapshotService, s.kubeClientConfig, s.kubeClient.CoreV1())
backupper, err := backup.NewKubernetesBackupper(
s.discoveryHelper,
client.NewDynamicFactory(s.clientPool),
podexec.NewPodCommandExecutor(s.kubeClientConfig, s.kubeClient.CoreV1().RESTClient()),
s.snapshotService,
s.resticManager,
config.PodVolumeOperationTimeout.Duration,
)
cmd.CheckError(err)
backupController := controller.NewBackupController(
s.sharedInformerFactory.Ark().V1().Backups(),
s.arkClient.ArkV1(),
@@ -490,6 +636,7 @@ func (s *server) runControllers(config *api.Config) error {
s.logger,
s.pluginManager,
backupTracker,
s.metrics,
)
wg.Add(1)
go func() {
@@ -504,6 +651,7 @@ func (s *server) runControllers(config *api.Config) error {
s.sharedInformerFactory.Ark().V1().Schedules(),
config.ScheduleSyncPeriod.Duration,
s.logger,
s.metrics,
)
wg.Add(1)
go func() {
@@ -514,6 +662,7 @@ func (s *server) runControllers(config *api.Config) error {
gcController := controller.NewGCController(
s.logger,
s.sharedInformerFactory.Ark().V1().Backups(),
s.sharedInformerFactory.Ark().V1().DeleteBackupRequests(),
s.arkClient.ArkV1(),
config.GCSyncPeriod.Duration,
)
@@ -534,6 +683,8 @@ func (s *server) runControllers(config *api.Config) error {
s.sharedInformerFactory.Ark().V1().Restores(),
s.arkClient.ArkV1(), // restoreClient
backupTracker,
s.resticManager,
s.sharedInformerFactory.Ark().V1().PodVolumeBackups(),
)
wg.Add(1)
go func() {
@@ -543,14 +694,16 @@ func (s *server) runControllers(config *api.Config) error {
}
restorer, err := newRestorer(
discoveryHelper,
s.clientPool,
restorer, err := restore.NewKubernetesRestorer(
s.discoveryHelper,
client.NewDynamicFactory(s.clientPool),
s.backupService,
s.snapshotService,
config.ResourcePriorities,
s.arkClient.ArkV1(),
s.kubeClient,
s.kubeClient.CoreV1().Namespaces(),
s.resticManager,
config.PodVolumeOperationTimeout.Duration,
s.logger,
)
cmd.CheckError(err)
@@ -588,6 +741,23 @@ func (s *server) runControllers(config *api.Config) error {
wg.Done()
}()
if s.resticManager != nil {
resticRepoController := controller.NewResticRepositoryController(
s.logger,
s.sharedInformerFactory.Ark().V1().ResticRepositories(),
s.arkClient.ArkV1(),
config.BackupStorageProvider,
s.resticManager,
)
wg.Add(1)
go func() {
// TODO only having a single worker may be an issue since maintenance
// can take a long time.
resticRepoController.Run(ctx, 1)
wg.Done()
}()
}
// SHARED INFORMERS HAVE TO BE STARTED AFTER ALL CONTROLLERS
go s.sharedInformerFactory.Start(ctx.Done())
@@ -643,41 +813,3 @@ func (s *server) removeDeprecatedGCFinalizer() {
}
}
}
func newBackupper(
discoveryHelper arkdiscovery.Helper,
clientPool dynamic.ClientPool,
backupService cloudprovider.BackupService,
snapshotService cloudprovider.SnapshotService,
kubeClientConfig *rest.Config,
kubeCoreV1Client kcorev1client.CoreV1Interface,
) (backup.Backupper, error) {
return backup.NewKubernetesBackupper(
discoveryHelper,
client.NewDynamicFactory(clientPool),
backup.NewPodCommandExecutor(kubeClientConfig, kubeCoreV1Client.RESTClient()),
snapshotService,
)
}
func newRestorer(
discoveryHelper arkdiscovery.Helper,
clientPool dynamic.ClientPool,
backupService cloudprovider.BackupService,
snapshotService cloudprovider.SnapshotService,
resourcePriorities []string,
backupClient arkv1client.BackupsGetter,
kubeClient kubernetes.Interface,
logger logrus.FieldLogger,
) (restore.Restorer, error) {
return restore.NewKubernetesRestorer(
discoveryHelper,
client.NewDynamicFactory(clientPool),
backupService,
snapshotService,
resourcePriorities,
backupClient,
kubeClient.CoreV1().Namespaces(),
logger,
)
}

Some files were not shown because too many files have changed in this diff Show More