mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-31 17:12:07 +00:00
Compare commits
916 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7e4fca428d | ||
|
|
7c2a6caa5a | ||
|
|
96fcf1661a | ||
|
|
6cf3db6244 | ||
|
|
fd8bf14bb6 | ||
|
|
ca107423f0 | ||
|
|
7384cf1115 | ||
|
|
3be3ec434b | ||
|
|
e38e2fc5a1 | ||
|
|
702d0f76c2 | ||
|
|
2ed241b0b7 | ||
|
|
ff0ac68157 | ||
|
|
c34e3b9127 | ||
|
|
d69e819122 | ||
|
|
1906c33eb2 | ||
|
|
32dd33e211 | ||
|
|
246d75811a | ||
|
|
ac317a87ff | ||
|
|
59ca9a3974 | ||
|
|
f983f4fb21 | ||
|
|
f1deff8ffc | ||
|
|
6a60a55ba5 | ||
|
|
5b529d2da0 | ||
|
|
cc47b65830 | ||
|
|
90babe0ed3 | ||
|
|
1b7a64a812 | ||
|
|
1da5702c0f | ||
|
|
5464b3dce8 | ||
|
|
aa9d96f3b7 | ||
|
|
8955199e37 | ||
|
|
e13806e0b8 | ||
|
|
555f73c3ea | ||
|
|
62d8c642d2 | ||
|
|
a4a09f09a2 | ||
|
|
312c6f5c3d | ||
|
|
b92b35d42b | ||
|
|
cdd499dc27 | ||
|
|
6b910e621a | ||
|
|
7dd62f8374 | ||
|
|
0dd1c4b086 | ||
|
|
51652e6a8c | ||
|
|
bdf2615eb0 | ||
|
|
ae4bffe30b | ||
|
|
f7fe949aa7 | ||
|
|
d28f445c5a | ||
|
|
7bdd7f8c87 | ||
|
|
4c5f352a73 | ||
|
|
e6ef2ba9e8 | ||
|
|
b8835d7880 | ||
|
|
a002ae6315 | ||
|
|
09042a9015 | ||
|
|
43cd2ca36f | ||
|
|
71214fa640 | ||
|
|
0d6d146bc1 | ||
|
|
d85872dfb2 | ||
|
|
daf1a75515 | ||
|
|
d27e4f7f05 | ||
|
|
858171e812 | ||
|
|
ce69ff59e0 | ||
|
|
64fd4c7b73 | ||
|
|
6b94f68201 | ||
|
|
f4820bd892 | ||
|
|
7029d627c7 | ||
|
|
0fd7872ef4 | ||
|
|
38c212ecf8 | ||
|
|
411d62fe91 | ||
|
|
fde5a4d1e2 | ||
|
|
b9de44ffbd | ||
|
|
eace0255de | ||
|
|
cbef9da721 | ||
|
|
cff0215906 | ||
|
|
75566c6c20 | ||
|
|
fa14255e53 | ||
|
|
25590a8351 | ||
|
|
22959071bc | ||
|
|
90c5ed0850 | ||
|
|
2781e4e8de | ||
|
|
e5556fe608 | ||
|
|
9ae861c9e2 | ||
|
|
449cac5806 | ||
|
|
5d039e4b23 | ||
|
|
366ca748d4 | ||
|
|
6ff98784fe | ||
|
|
698420b613 | ||
|
|
caa990a272 | ||
|
|
3788014552 | ||
|
|
40e33020d8 | ||
|
|
becd075000 | ||
|
|
eb59b5c593 | ||
|
|
6c9e1f187f | ||
|
|
318fd8a83f | ||
|
|
defb8aa856 | ||
|
|
7abe115674 | ||
|
|
96ad3ec7b4 | ||
|
|
6b70d9225d | ||
|
|
76982d3d02 | ||
|
|
7d497e6f2f | ||
|
|
ec013e6ffd | ||
|
|
cb0e6f4773 | ||
|
|
8bbfc538f1 | ||
|
|
c3c2ea2805 | ||
|
|
4239e61f47 | ||
|
|
9165d514a3 | ||
|
|
6231aaa875 | ||
|
|
681f7043a8 | ||
|
|
d6162e943b | ||
|
|
7c62ed2981 | ||
|
|
0f7f084fb9 | ||
|
|
a15df2761c | ||
|
|
46bed015f5 | ||
|
|
beb22f953b | ||
|
|
e3a7d6a20d | ||
|
|
40882d7ee7 | ||
|
|
400911e96b | ||
|
|
52574b9c0a | ||
|
|
f1cb85134c | ||
|
|
ce41dd7225 | ||
|
|
b818cc2769 | ||
|
|
51379b0150 | ||
|
|
9063808606 | ||
|
|
6e2166c49c | ||
|
|
18b434cb24 | ||
|
|
39d9155267 | ||
|
|
bb65d67a13 | ||
|
|
b5a2ccd510 | ||
|
|
74cb6a2150 | ||
|
|
0152885bb2 | ||
|
|
677491410b | ||
|
|
21a2a2e1f5 | ||
|
|
04aedbb5ac | ||
|
|
22704d283f | ||
|
|
516422c2c4 | ||
|
|
195e6aaf00 | ||
|
|
9cda7eae55 | ||
|
|
bca585162f | ||
|
|
573ce7d0e7 | ||
|
|
6cf3519c3a | ||
|
|
6591b5a09c | ||
|
|
7c4ac05ae5 | ||
|
|
90d9be59d3 | ||
|
|
a7e524db33 | ||
|
|
f3a57b5b8a | ||
|
|
b66c6b1105 | ||
|
|
ef19497205 | ||
|
|
6d4e702cda | ||
|
|
596eea1b8c | ||
|
|
f014cab1fe | ||
|
|
8acc66d02f | ||
|
|
6ef155ddff | ||
|
|
57ce590fae | ||
|
|
028fafb6cf | ||
|
|
d0937a3433 | ||
|
|
db856affcb | ||
|
|
56da761e68 | ||
|
|
9952dfb0ad | ||
|
|
fe19863089 | ||
|
|
e897153328 | ||
|
|
7f1553306a | ||
|
|
b3b065a9fb | ||
|
|
77d05ec1c3 | ||
|
|
cf2c27141b | ||
|
|
ec124673fa | ||
|
|
c36131a024 | ||
|
|
17b3a3b073 | ||
|
|
406b50a71b | ||
|
|
268080ad09 | ||
|
|
4a03370f1d | ||
|
|
38c72b8cc2 | ||
|
|
0ec2de55c0 | ||
|
|
35bb533c2d | ||
|
|
da9ed38c63 | ||
|
|
e24248e07a | ||
|
|
df07b7dc9f | ||
|
|
4af89fa863 | ||
|
|
02f50b9c84 | ||
|
|
1aa712d236 | ||
|
|
bbf769850e | ||
|
|
aeb221eafe | ||
|
|
ffc612ac13 | ||
|
|
f20342aab9 | ||
|
|
7172db8a1e | ||
|
|
e5a8fab9e0 | ||
|
|
ebc379ac02 | ||
|
|
6243a354b8 | ||
|
|
99adc4fa55 | ||
|
|
0e7f442f00 | ||
|
|
37031d5e7c | ||
|
|
480f1942b7 | ||
|
|
01980f0611 | ||
|
|
474efde6ba | ||
|
|
4173515435 | ||
|
|
fe51be6713 | ||
|
|
38e86ceff5 | ||
|
|
611bc92695 | ||
|
|
63dc6c1d9a | ||
|
|
d7dfffa373 | ||
|
|
7575ff22ca | ||
|
|
d579784692 | ||
|
|
30369c2ad5 | ||
|
|
7c3f4ddd74 | ||
|
|
0612c5de70 | ||
|
|
1da3278ad6 | ||
|
|
66bcbc058c | ||
|
|
18c51fbd4b | ||
|
|
3af43b492f | ||
|
|
d009163b67 | ||
|
|
257917767f | ||
|
|
6c398fc42a | ||
|
|
b31c6e1a36 | ||
|
|
a5834b852d | ||
|
|
a0604d6d1a | ||
|
|
9873e648d1 | ||
|
|
f4c99c7774 | ||
|
|
91e45d5689 | ||
|
|
f7a8091645 | ||
|
|
ed0eb865ec | ||
|
|
1d90d02a99 | ||
|
|
cb43ff91ef | ||
|
|
0c88eefc0d | ||
|
|
eb709b8f70 | ||
|
|
82ab2d73ac | ||
|
|
65209cd0f3 | ||
|
|
190d032551 | ||
|
|
ae4bf3d5f8 | ||
|
|
0f1c5c283f | ||
|
|
94b8fae15a | ||
|
|
729a688b36 | ||
|
|
af3af1b520 | ||
|
|
3542f39f5f | ||
|
|
9fdf85130a | ||
|
|
a57acec43d | ||
|
|
c5e3f0eecb | ||
|
|
2073e15a69 | ||
|
|
2e9a83f4c0 | ||
|
|
0fc3e8d852 | ||
|
|
ed2bca83bd | ||
|
|
a6fa7af095 | ||
|
|
e46e89cb61 | ||
|
|
42b54586cd | ||
|
|
8bc7e4f6aa | ||
|
|
889b220a5a | ||
|
|
e3232b7eb6 | ||
|
|
e3222a9e3f | ||
|
|
d95b18bad8 | ||
|
|
907a9fbdd8 | ||
|
|
a28327b47e | ||
|
|
17be71e1ff | ||
|
|
20635106e8 | ||
|
|
6fd9ea9d5f | ||
|
|
368787c184 | ||
|
|
4323e287c9 | ||
|
|
a793cbb62b | ||
|
|
4833607abd | ||
|
|
5161890738 | ||
|
|
3a60853340 | ||
|
|
a5cb2ce4b7 | ||
|
|
80bc3ded14 | ||
|
|
afb8102a44 | ||
|
|
7668bfd495 | ||
|
|
5cc58ed305 | ||
|
|
84f01df828 | ||
|
|
b5e4f85b8c | ||
|
|
1dda7b91cc | ||
|
|
468006e619 | ||
|
|
3eb1160301 | ||
|
|
e6b44539a5 | ||
|
|
fcd27a13da | ||
|
|
69e2f51fbe | ||
|
|
ffef86e38d | ||
|
|
a6dbfe822f | ||
|
|
cda3dff8e3 | ||
|
|
7c36d80ee2 | ||
|
|
f049e0787b | ||
|
|
c9a9dd4508 | ||
|
|
086d0667de | ||
|
|
94617b302d | ||
|
|
779cb42854 | ||
|
|
44a75cee7b | ||
|
|
92283a52d6 | ||
|
|
7d8813a96c | ||
|
|
f0edf7335f | ||
|
|
af64069d65 | ||
|
|
29d75d72e2 | ||
|
|
bc516112da | ||
|
|
211aa7b7fd | ||
|
|
5ccc27aaed | ||
|
|
d34994cb5f | ||
|
|
ae373bdbfb | ||
|
|
889f0af5b8 | ||
|
|
a8c42ab245 | ||
|
|
283a1349bd | ||
|
|
a50367f148 | ||
|
|
7bc27bbbfd | ||
|
|
e94277ac4d | ||
|
|
7aadc39cd6 | ||
|
|
df69b274a0 | ||
|
|
cb321db21f | ||
|
|
9d7ea7483c | ||
|
|
cd4e9f5336 | ||
|
|
a440029c2f | ||
|
|
b31e25bf6e | ||
|
|
729d733986 | ||
|
|
6445dbf1c7 | ||
|
|
133dc185ca | ||
|
|
7a1e6d16cc | ||
|
|
6f7bfe545d | ||
|
|
bd4d97b9e4 | ||
|
|
0e94fa37f9 | ||
|
|
2750aa71b9 | ||
|
|
20f89fbcef | ||
|
|
833a6307a9 | ||
|
|
cf7c8587f0 | ||
|
|
3234124afe | ||
|
|
74043ab428 | ||
|
|
7007f198e1 | ||
|
|
8f5346150c | ||
|
|
bab08ed1a6 | ||
|
|
c6f488f75f | ||
|
|
06b5af449f | ||
|
|
adbcd3703b | ||
|
|
2a34772ed5 | ||
|
|
56f1617049 | ||
|
|
345c3c39b1 | ||
|
|
a25eb03290 | ||
|
|
575c4ddc6d | ||
|
|
030ea6c0ad | ||
|
|
d32f8dbb6c | ||
|
|
adc29a2db0 | ||
|
|
342a1c6437 | ||
|
|
8a5d6f9111 | ||
|
|
79c45133f7 | ||
|
|
9c11ba900c | ||
|
|
ea50ebf2b5 | ||
|
|
ec61bc8654 | ||
|
|
7edbb91cfa | ||
|
|
d29c96387e | ||
|
|
9508e4a20e | ||
|
|
7200a89e39 | ||
|
|
33ee4a8721 | ||
|
|
23d570ec04 | ||
|
|
65cd5c602f | ||
|
|
95b2b90006 | ||
|
|
0c3ac67b6d | ||
|
|
6e53aa0350 | ||
|
|
17d984d4b4 | ||
|
|
5acccaa739 | ||
|
|
1f7a4a1665 | ||
|
|
3aa241a74c | ||
|
|
27003af62a | ||
|
|
c5f5862c9c | ||
|
|
c6ef76d2b3 | ||
|
|
8ac285cf52 | ||
|
|
eb6f742b5d | ||
|
|
fb4d507c8a | ||
|
|
de553a2fc1 | ||
|
|
e7bb592602 | ||
|
|
6f061db9a2 | ||
|
|
2e875521eb | ||
|
|
8ce513acbd | ||
|
|
1c26fbde32 | ||
|
|
430ec2451a | ||
|
|
130512187a | ||
|
|
131afb571e | ||
|
|
74dbf38793 | ||
|
|
652069b5e6 | ||
|
|
8789ae5cb1 | ||
|
|
10c98caade | ||
|
|
fe9d61a9a7 | ||
|
|
0d2efaf991 | ||
|
|
7e2bec46b8 | ||
|
|
4f3c890e82 | ||
|
|
ca5656c279 | ||
|
|
82f1cd87dc | ||
|
|
d2e629f5a8 | ||
|
|
7cebfe2df0 | ||
|
|
39c03008be | ||
|
|
625ba48117 | ||
|
|
d177d94945 | ||
|
|
3b4276a0e4 | ||
|
|
dcae6eb0a2 | ||
|
|
daa61c5f71 | ||
|
|
06d6665abb | ||
|
|
cc359f6ecb | ||
|
|
b0af81e780 | ||
|
|
08e7c54cb6 | ||
|
|
4aea49dbc6 | ||
|
|
f62045623c | ||
|
|
74d59498d7 | ||
|
|
e063b7983a | ||
|
|
dd1e150511 | ||
|
|
c47a364ab3 | ||
|
|
450fa72fbb | ||
|
|
cbcb9c7d79 | ||
|
|
39c4267ace | ||
|
|
64f0d6dffd | ||
|
|
78cbdf95f3 | ||
|
|
85a61b8e8d | ||
|
|
a2a7dbda09 | ||
|
|
7a964ae2ca | ||
|
|
1df9a8a38d | ||
|
|
e11634bfbc | ||
|
|
5329a4d67d | ||
|
|
13f893f1f9 | ||
|
|
b4a52e45cf | ||
|
|
f5d975b06d | ||
|
|
3efe677007 | ||
|
|
7e8c8c69b9 | ||
|
|
d195512062 | ||
|
|
1e2b141e5d | ||
|
|
683f7afc0d | ||
|
|
4e74e77738 | ||
|
|
b71a37dbfc | ||
|
|
9ca76226e2 | ||
|
|
217084cdcc | ||
|
|
f4f602cdf9 | ||
|
|
29b0774022 | ||
|
|
040788bb06 | ||
|
|
09afeb6d3e | ||
|
|
fae00a7622 | ||
|
|
5b89f7b6db | ||
|
|
c6050845a0 | ||
|
|
706ae07d0d | ||
|
|
f349f85b05 | ||
|
|
aea68414cf | ||
|
|
bb8e2e9131 | ||
|
|
1a71437d59 | ||
|
|
dc84e591bb | ||
|
|
c66a139ff9 | ||
|
|
9c41e3fabb | ||
|
|
23abbc9a47 | ||
|
|
f042653886 | ||
|
|
efae9792db | ||
|
|
8327536b59 | ||
|
|
a2c1fece33 | ||
|
|
8e7a2eed77 | ||
|
|
562a719382 | ||
|
|
cfdcd65f41 | ||
|
|
cf336d8019 | ||
|
|
795dc26214 | ||
|
|
2999f158db | ||
|
|
1e08e81537 | ||
|
|
8dd9cded1a | ||
|
|
42f2891485 | ||
|
|
9db5e36b54 | ||
|
|
a70456f5ee | ||
|
|
3646fcce46 | ||
|
|
c18decc89b | ||
|
|
5ce92adff0 | ||
|
|
547625c333 | ||
|
|
eabef08561 | ||
|
|
f5eac0b434 | ||
|
|
32907931e1 | ||
|
|
244994d316 | ||
|
|
39bb3963ee | ||
|
|
ae4aad0890 | ||
|
|
1857257265 | ||
|
|
eb19228d16 | ||
|
|
afc9e9cde1 | ||
|
|
fe286ff564 | ||
|
|
1cc99ffa60 | ||
|
|
31b8ff92df | ||
|
|
eaeb9d677e | ||
|
|
11c176c490 | ||
|
|
539de6d361 | ||
|
|
96b72acb2d | ||
|
|
fa470170cf | ||
|
|
75a9879774 | ||
|
|
a5722262d1 | ||
|
|
dd7bdf05f3 | ||
|
|
255a991c6e | ||
|
|
781b7cd1aa | ||
|
|
51298f84cc | ||
|
|
22e8f23e2c | ||
|
|
e015238e6d | ||
|
|
a697ad164e | ||
|
|
29ac0b4a6c | ||
|
|
ee5afe148c | ||
|
|
7c283e5de8 | ||
|
|
5e28f322cf | ||
|
|
0da5f1ccca | ||
|
|
6426706390 | ||
|
|
636b09a548 | ||
|
|
5ad21854f7 | ||
|
|
57c5485501 | ||
|
|
e4856d17ca | ||
|
|
fb0696d0c3 | ||
|
|
13344076c2 | ||
|
|
25d3597c9a | ||
|
|
4a7457ecfe | ||
|
|
f2072e5868 | ||
|
|
8306566216 | ||
|
|
a927906e52 | ||
|
|
6e9e653f76 | ||
|
|
3481618324 | ||
|
|
65ed8da4b7 | ||
|
|
f1e82a2fe3 | ||
|
|
de12ca4882 | ||
|
|
01b5828ee7 | ||
|
|
e7d00cf5fd | ||
|
|
2dfa7a1a72 | ||
|
|
845c9cfa61 | ||
|
|
6fb11b8087 | ||
|
|
cc9140b3cc | ||
|
|
7be81fe60e | ||
|
|
bc20398119 | ||
|
|
05e86ee734 | ||
|
|
dc273e3bed | ||
|
|
11c3837f9b | ||
|
|
5d7969f4b9 | ||
|
|
9245e9d5dc | ||
|
|
f7a42f378f | ||
|
|
d1e3688468 | ||
|
|
83658e891e | ||
|
|
3db7c038a5 | ||
|
|
6236085327 | ||
|
|
6da32a4955 | ||
|
|
0e0ac10388 | ||
|
|
bd0b874631 | ||
|
|
a522a96789 | ||
|
|
3177140db0 | ||
|
|
d7134b1df2 | ||
|
|
81520a9b86 | ||
|
|
453b0a04f4 | ||
|
|
459fe663ee | ||
|
|
50816ba23b | ||
|
|
2c6fc5bd90 | ||
|
|
6897c2f901 | ||
|
|
e354b1c130 | ||
|
|
e1cf244592 | ||
|
|
d9924e0f3f | ||
|
|
c1f4e6d92d | ||
|
|
e7453ebc98 | ||
|
|
0396ca1dee | ||
|
|
17f6a14d37 | ||
|
|
c0cf61912d | ||
|
|
67b40c7fc8 | ||
|
|
e2561f9073 | ||
|
|
e3d6902ede | ||
|
|
68020d0e4b | ||
|
|
ed2d7b445c | ||
|
|
50d4084fac | ||
|
|
c2c5b9040c | ||
|
|
6f62749c1a | ||
|
|
86b9cc6d15 | ||
|
|
4e2a77d683 | ||
|
|
ca83f000ea | ||
|
|
10d6dd006a | ||
|
|
4065c0f194 | ||
|
|
5643e8ebb5 | ||
|
|
6dbde599bf | ||
|
|
b2ec87f05f | ||
|
|
fb33d93186 | ||
|
|
170034787d | ||
|
|
b92d086712 | ||
|
|
18e2401e79 | ||
|
|
849297e623 | ||
|
|
92e9d307a5 | ||
|
|
09bbe072cd | ||
|
|
5ff582ec42 | ||
|
|
b029860b46 | ||
|
|
8ce2006814 | ||
|
|
24dfef6f15 | ||
|
|
cb7bcea5c3 | ||
|
|
aeb5f6d832 | ||
|
|
16f707aa11 | ||
|
|
2fde1f5fc1 | ||
|
|
3a746a3f73 | ||
|
|
f288902e3e | ||
|
|
7eac6675e8 | ||
|
|
f6761ddd00 | ||
|
|
e313d6200a | ||
|
|
5d74a92cf1 | ||
|
|
f936c55a37 | ||
|
|
ad93135adb | ||
|
|
67263d2652 | ||
|
|
4fcd222777 | ||
|
|
6d6f734bc9 | ||
|
|
014c0e2c4c | ||
|
|
1c950aa17b | ||
|
|
ed7fbc9178 | ||
|
|
20f56e9868 | ||
|
|
5bfd4f64db | ||
|
|
09c20b51e6 | ||
|
|
fbb5ead4e9 | ||
|
|
9fc1711d45 | ||
|
|
c4d1e705d3 | ||
|
|
43b1f9a19e | ||
|
|
ea83ed32f5 | ||
|
|
041cfc2173 | ||
|
|
50a5550291 | ||
|
|
ef5ac7fd05 | ||
|
|
3f3deda3d4 | ||
|
|
9e521aa757 | ||
|
|
a280e8cfd2 | ||
|
|
c4bb6501ca | ||
|
|
9affb3c92a | ||
|
|
e81de2491f | ||
|
|
51928e9177 | ||
|
|
5d8d221157 | ||
|
|
6754955bcd | ||
|
|
fbda82ed63 | ||
|
|
923870390b | ||
|
|
96b0808e3a | ||
|
|
e6624506cf | ||
|
|
6d46b5f1eb | ||
|
|
193fdb7026 | ||
|
|
b6316aff70 | ||
|
|
1f7d5c18f2 | ||
|
|
fd1c8294ce | ||
|
|
2889db72ac | ||
|
|
18d6b233da | ||
|
|
c8989231eb | ||
|
|
05cb059b1a | ||
|
|
ff6e9dd2f3 | ||
|
|
deae0e6ae1 | ||
|
|
dc8c66b305 | ||
|
|
66646e6e03 | ||
|
|
726d61fab4 | ||
|
|
72d7e5e01d | ||
|
|
206bf3dfe3 | ||
|
|
a466208975 | ||
|
|
3dc093c24a | ||
|
|
c7b52bf1fe | ||
|
|
989169dcfe | ||
|
|
31645d163e | ||
|
|
97e52f2b3c | ||
|
|
961d7f2924 | ||
|
|
f87280d369 | ||
|
|
9155baee7c | ||
|
|
d6436ad2b1 | ||
|
|
783bdb0455 | ||
|
|
631f3d6acc | ||
|
|
94309aa72e | ||
|
|
c1af90dcc8 | ||
|
|
3be8d072ab | ||
|
|
5c0e3a6715 | ||
|
|
a9bd9a76cc | ||
|
|
300a010959 | ||
|
|
5d7e467794 | ||
|
|
bdfdf8d38e | ||
|
|
19c2fe3aaf | ||
|
|
6549ef8075 | ||
|
|
1f6b496f5e | ||
|
|
d47887a807 | ||
|
|
71e1e2990e | ||
|
|
e7098e5d88 | ||
|
|
b446f66baa | ||
|
|
288ce9f33c | ||
|
|
1db966bf3c | ||
|
|
2e08fd40a7 | ||
|
|
3a85f25e2b | ||
|
|
730ddb2113 | ||
|
|
e78c892a59 | ||
|
|
7f3e88151b | ||
|
|
468c4faf1b | ||
|
|
a48cc6ed23 | ||
|
|
352d667e13 | ||
|
|
8a9fdc4316 | ||
|
|
099f91f8fb | ||
|
|
0998f6d8af | ||
|
|
612fe31bff | ||
|
|
33cc85cd0c | ||
|
|
258f3e011e | ||
|
|
278c1c6087 | ||
|
|
b6b87668c0 | ||
|
|
0d2b49acce | ||
|
|
5bb47d2b7a | ||
|
|
bcd63bf0e8 | ||
|
|
25d46a709b | ||
|
|
3f2e222ae4 | ||
|
|
644a75e3c1 | ||
|
|
ef57a44827 | ||
|
|
4328b67f93 | ||
|
|
a4d5061a02 | ||
|
|
1ae492da6c | ||
|
|
dd9b9f3bba | ||
|
|
1545406472 | ||
|
|
74f60b1ee1 | ||
|
|
b0a2a0bf7b | ||
|
|
1455d67152 | ||
|
|
a5f2fec0a0 | ||
|
|
4d8fb900c6 | ||
|
|
d1293825ef | ||
|
|
80b66434c0 | ||
|
|
c60e47dedd | ||
|
|
a2f5e14a32 | ||
|
|
73499c2cff | ||
|
|
3063d33d71 | ||
|
|
560f9504e1 | ||
|
|
db0a670c4a | ||
|
|
fd2bf9f03a | ||
|
|
3172e907a1 | ||
|
|
07fcc927b7 | ||
|
|
45cee7d222 | ||
|
|
bd505fb0ad | ||
|
|
b2b16b3c85 | ||
|
|
0388845bca | ||
|
|
0dddfc3e0f | ||
|
|
a05ae1a7cf | ||
|
|
df985bca74 | ||
|
|
cab904570f | ||
|
|
9673e9d158 | ||
|
|
51c546786e | ||
|
|
f83b1de1dc | ||
|
|
409f17361d | ||
|
|
7257a75f82 | ||
|
|
8d8ae9c2df | ||
|
|
5b8a9d9632 | ||
|
|
178a007b70 | ||
|
|
26944cf9a2 | ||
|
|
7328fd0e10 | ||
|
|
fd579f5f16 | ||
|
|
5cc29e0db2 | ||
|
|
973f630cc7 | ||
|
|
331e0c28cc | ||
|
|
7c7bfb06b4 | ||
|
|
0c41688354 | ||
|
|
c281124cba | ||
|
|
f53d605e77 | ||
|
|
6930c846e5 | ||
|
|
9d03315558 | ||
|
|
d5cdbb0ab7 | ||
|
|
cc9be4489a | ||
|
|
cbff5ef53b | ||
|
|
509431909a | ||
|
|
975dce929c | ||
|
|
f733869142 | ||
|
|
52641e8e29 | ||
|
|
7de129e4a3 | ||
|
|
dc8bc5f486 | ||
|
|
e3c40a1de7 | ||
|
|
e618e0e456 | ||
|
|
f13b0c00a3 | ||
|
|
e73ba839f3 | ||
|
|
abeeee6643 | ||
|
|
d24fb232cc | ||
|
|
465c60b408 | ||
|
|
fcbc7bd570 | ||
|
|
530e22939d | ||
|
|
22ec2a3d56 | ||
|
|
7e0a629db1 | ||
|
|
41b1a80398 | ||
|
|
601d9f2da1 | ||
|
|
a289b1c645 | ||
|
|
bc77307d75 | ||
|
|
2bc8c52053 | ||
|
|
a0111d875f | ||
|
|
08d4fc8b88 | ||
|
|
46adb6c6aa | ||
|
|
c417e30a07 | ||
|
|
f449e238fc | ||
|
|
dc5bbada36 | ||
|
|
621d0fad4e | ||
|
|
3b8e32facc | ||
|
|
ac1f833415 | ||
|
|
cc4d2916fa | ||
|
|
11f2be6254 | ||
|
|
b2cd8e1fe8 | ||
|
|
262dd46d7a | ||
|
|
8cf272473d | ||
|
|
50b5c0f8a6 | ||
|
|
6712e2a9ec | ||
|
|
f5f1dbac5d | ||
|
|
1035cf72ce | ||
|
|
af81f589f6 | ||
|
|
350fd894f3 | ||
|
|
37f498cd37 | ||
|
|
bb7b0a3c33 | ||
|
|
b53ee44176 | ||
|
|
e130011d1e | ||
|
|
4264abd77f | ||
|
|
47da01c0e9 | ||
|
|
816f14c0b4 | ||
|
|
326eb7a4e8 | ||
|
|
92a18f490b | ||
|
|
de785af89d | ||
|
|
d56b5f2b66 | ||
|
|
412793e809 | ||
|
|
656428d0b9 | ||
|
|
1b124a3146 | ||
|
|
6b0b6373da | ||
|
|
56ca4bab2f | ||
|
|
1503796419 | ||
|
|
3b157afbbd | ||
|
|
17fde8387a | ||
|
|
1c974782fa | ||
|
|
8878ba860e | ||
|
|
83e85ff2ab | ||
|
|
cbc8a7927e | ||
|
|
8e5feec39c | ||
|
|
f5123794e0 | ||
|
|
5fc5bdbcf8 | ||
|
|
0fc087c967 | ||
|
|
7cc73fb9d2 | ||
|
|
732ba2f3e0 | ||
|
|
fbda5fe539 | ||
|
|
1e581f1ead | ||
|
|
fea701a03b | ||
|
|
1210cb36e1 | ||
|
|
79c0e5a09a | ||
|
|
7392cdf37b | ||
|
|
ccdbee6b16 | ||
|
|
3641c2c043 | ||
|
|
0045bb057d | ||
|
|
56c9d68137 | ||
|
|
4aea9b9a2c | ||
|
|
75fdf32e4c | ||
|
|
6d5eeb21f5 | ||
|
|
a70addcaf7 | ||
|
|
ee16aab378 | ||
|
|
a4d3d333ee | ||
|
|
632c90bd08 | ||
|
|
17fcedefae | ||
|
|
992940c55c | ||
|
|
121b7153a7 | ||
|
|
bd8f433188 | ||
|
|
94ac3b3529 | ||
|
|
41ec1c540c | ||
|
|
8a0ac2117b | ||
|
|
cbcd15d603 | ||
|
|
3100e856a0 | ||
|
|
b66efd9416 | ||
|
|
038fa39451 | ||
|
|
7e3fc0884e | ||
|
|
995050390c | ||
|
|
0b6e78145c | ||
|
|
5b8562e73c | ||
|
|
dc484d1566 | ||
|
|
1f7e9b65e8 | ||
|
|
c129d1cec3 | ||
|
|
c700455272 | ||
|
|
99f67db39d | ||
|
|
526b604237 | ||
|
|
8e740faafc | ||
|
|
0c092eaa17 | ||
|
|
f0b35cc45a | ||
|
|
179b95c81d | ||
|
|
98d370f84d | ||
|
|
5dc50e4974 | ||
|
|
c2dc41efd8 | ||
|
|
062a5d7557 | ||
|
|
ad47513170 | ||
|
|
932b8259ae | ||
|
|
0f2d1ab82b | ||
|
|
2ce15de2f8 | ||
|
|
fc6da9b3db | ||
|
|
0d42815d31 | ||
|
|
194d21c6a3 | ||
|
|
c57a9b94b9 | ||
|
|
c2fa812ffb | ||
|
|
9996eec6be | ||
|
|
b184c0b348 | ||
|
|
01e9c86a01 | ||
|
|
eb6f1a7b5c | ||
|
|
4c481f4d23 | ||
|
|
af189fd5f4 | ||
|
|
34a6f492e5 | ||
|
|
38aa43885b | ||
|
|
015869cc29 | ||
|
|
7fb507689f | ||
|
|
8ba5a29679 | ||
|
|
24ce316788 | ||
|
|
3975187d57 | ||
|
|
cb49c62aaf | ||
|
|
35b46e392c | ||
|
|
21e2019540 | ||
|
|
71bb702297 | ||
|
|
e21c66c494 | ||
|
|
737b6d932a | ||
|
|
3f840d4ce5 | ||
|
|
390e47b08b | ||
|
|
962ea4708e | ||
|
|
72b0bdde70 | ||
|
|
4ee6b81647 | ||
|
|
5e4fc8f84a | ||
|
|
efa4e57d1e | ||
|
|
9471f9da3c | ||
|
|
8b25114047 | ||
|
|
b2d80471ac | ||
|
|
203a9c6e05 | ||
|
|
55c038afa0 | ||
|
|
7f959f0184 | ||
|
|
c49d11f17a | ||
|
|
e7703d88ec | ||
|
|
f28d008017 | ||
|
|
d87e8ee16e | ||
|
|
4f59b19cdc | ||
|
|
36a40a0cd3 | ||
|
|
073795715e | ||
|
|
7f78d5cbbc | ||
|
|
9401ca3c1a | ||
|
|
93b2f30e32 | ||
|
|
9c3d7f9098 | ||
|
|
0fab2e5e89 | ||
|
|
39f0a4e561 | ||
|
|
c1bc52eb65 | ||
|
|
64632e29f8 | ||
|
|
78dc641b15 | ||
|
|
eaf84eafb6 | ||
|
|
b8cd614122 | ||
|
|
aa253bf016 | ||
|
|
293674c40e | ||
|
|
322cbc19e6 | ||
|
|
15fe87aea3 | ||
|
|
4957dfce61 | ||
|
|
9249a13661 | ||
|
|
843345f728 | ||
|
|
43449885a1 | ||
|
|
18eafd5606 | ||
|
|
e5c8d3316f | ||
|
|
c3feb0489f | ||
|
|
1af01e28ef | ||
|
|
67811606c0 | ||
|
|
2cdd8448c2 | ||
|
|
1ac0303283 | ||
|
|
8a53cef7e6 |
34
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
34
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Tell us about a problem you are experiencing
|
||||
|
||||
---
|
||||
|
||||
**What steps did you take and what happened:**
|
||||
[A clear and concise description of what the bug is, and what commands you ran.)
|
||||
|
||||
|
||||
**What did you expect to happen:**
|
||||
|
||||
|
||||
**The output of the following commands will help us better understand what's going on**:
|
||||
(Pasting long output into a [GitHub gist](https://gist.github.com) or other pastebin is fine.)
|
||||
|
||||
* `kubectl logs deployment/ark -n heptio-ark`
|
||||
* `ark backup describe <backupname>` or `kubectl get backup/<backupname> -n heptio-ark -o yaml`
|
||||
* `ark backup logs <backupname>`
|
||||
* `ark restore describe <restorename>` or `kubectl get restore/<restorename> -n heptio-ark -o yaml`
|
||||
* `ark restore logs <restorename>`
|
||||
|
||||
|
||||
**Anything else you would like to add:**
|
||||
[Miscellaneous information that will assist in solving the issue.]
|
||||
|
||||
|
||||
**Environment:**
|
||||
|
||||
- Ark version (use `ark version`):
|
||||
- Kubernetes version (use `kubectl version`):
|
||||
- Kubernetes installer & version:
|
||||
- Cloud provider or hardware configuration:
|
||||
- OS (e.g. from `/etc/os-release`):
|
||||
21
.github/ISSUE_TEMPLATE/feature-enhancement-request.md
vendored
Normal file
21
.github/ISSUE_TEMPLATE/feature-enhancement-request.md
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
---
|
||||
name: Feature enhancement request
|
||||
about: Suggest an idea for this project
|
||||
|
||||
---
|
||||
|
||||
**Describe the solution you'd like**
|
||||
[A clear and concise description of what you want to happen.]
|
||||
|
||||
|
||||
**Anything else you would like to add:**
|
||||
[Miscellaneous information that will assist in solving the issue.]
|
||||
|
||||
|
||||
**Environment:**
|
||||
|
||||
- Ark version (use `ark version`):
|
||||
- Kubernetes version (use `kubectl version`):
|
||||
- Kubernetes installer & version:
|
||||
- Cloud provider or hardware configuration:
|
||||
- OS (e.g. from `/etc/os-release`):
|
||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -7,6 +7,7 @@
|
||||
_obj
|
||||
_test
|
||||
_output
|
||||
config
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
@@ -32,3 +33,11 @@ debug
|
||||
.container-*
|
||||
.vimrc
|
||||
.go
|
||||
.DS_Store
|
||||
.push-*
|
||||
.vscode
|
||||
*.diff
|
||||
|
||||
_site/
|
||||
|
||||
.vs
|
||||
|
||||
54
.goreleaser.yml
Normal file
54
.goreleaser.yml
Normal file
@@ -0,0 +1,54 @@
|
||||
# Copyright 2018 the Heptio Ark contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
dist: _output
|
||||
before:
|
||||
hooks:
|
||||
- ./hack/set-example-tags.sh
|
||||
builds:
|
||||
- main: ./cmd/ark/main.go
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
- darwin
|
||||
- windows
|
||||
goarch:
|
||||
- amd64
|
||||
- arm
|
||||
- arm64
|
||||
ignore:
|
||||
# don't build arm/arm64 for darwin or windows
|
||||
- goos: darwin
|
||||
goarch: arm
|
||||
- goos: darwin
|
||||
goarch: arm64
|
||||
- goos: windows
|
||||
goarch: arm
|
||||
- goos: windows
|
||||
goarch: arm64
|
||||
ldflags:
|
||||
- -X "github.com/heptio/ark/pkg/buildinfo.Version={{ .Tag }}" -X "github.com/heptio/ark/pkg/buildinfo.GitSHA={{ .FullCommit }}" -X "github.com/heptio/ark/pkg/buildinfo.GitTreeState={{ .Env.GIT_TREE_STATE }}"
|
||||
archive:
|
||||
name_template: "{{ .ProjectName }}-{{ .Tag }}-{{ .Os }}-{{ .Arch }}"
|
||||
files:
|
||||
- LICENSE
|
||||
- config/**/*
|
||||
checksum:
|
||||
name_template: 'CHECKSUM'
|
||||
release:
|
||||
github:
|
||||
owner: heptio
|
||||
name: ark
|
||||
draft: true
|
||||
11
.travis.yml
Normal file
11
.travis.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.11.x
|
||||
|
||||
sudo: required
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
script: make ci
|
||||
85
CHANGELOG.md
85
CHANGELOG.md
@@ -1,60 +1,35 @@
|
||||
# Changelog
|
||||
## Development release:
|
||||
|
||||
#### [v0.5.0](https://github.com/heptio/ark/tree/v0.5.0) - 2017-10-26
|
||||
Breaking changes:
|
||||
* The backup tar file format has changed. Backups created using previous versions of Ark cannot be restored using v0.5.0.
|
||||
* When backing up one or more specific namespaces, cluster-scoped resources are no longer backed up by default, with the exception of PVs that are used within the target namespace(s). Cluster-scoped resources can still be included by explicitly specifying `--include-cluster-resources`.
|
||||
* [Unreleased Changes][9]
|
||||
|
||||
New features:
|
||||
* Add customized user-agent string for Ark CLI
|
||||
* Switch from glog to logrus
|
||||
* Exclude nodes from restoration
|
||||
* Add a FAQ
|
||||
* Record PV availability zone and use it when restoring volumes from snapshots
|
||||
* Back up the PV associated with a PVC
|
||||
* Add `--include-cluster-resources` flag to `ark backup create`
|
||||
* Add `--include-cluster-resources` flag to `ark restore create`
|
||||
* Properly support resource restore priorities across cluster-scoped and namespace-scoped resources
|
||||
* Support `ark create ...` and `ark get ...`
|
||||
* Make ark run as cluster-admin
|
||||
* Add pod exec backup hooks
|
||||
* Support cross-compilation & upgrade to go 1.9
|
||||
### Bug Fixes / Other Changes
|
||||
* add multizone/regional support to gcp (#765, @wwitzel3)
|
||||
* Delete spec.priority in pod restore action (#879, @mwieczorek)
|
||||
* Added brew reference (#1051, @omerlh)
|
||||
* Update to go 1.11 (#1069, @gliptak)
|
||||
* Initialize empty schedule metrics on server init (#1054, @cbeneke)
|
||||
* Update CHANGELOGs (#1063, @wwitzel3)
|
||||
* Remove default token from all service accounts (#1048, @ncdc)
|
||||
* Allow to use AWS Signature v1 for creating signed AWS urls (#811, @bashofmann)
|
||||
|
||||
Bug fixes:
|
||||
* Make config change detection more robust
|
||||
## Current release:
|
||||
* [CHANGELOG-0.10.md][8]
|
||||
|
||||
#### [v0.4.0](https://github.com/heptio/ark/tree/v0.4.0) - 2017-09-14
|
||||
Breaking changes:
|
||||
* Snapshotting and restoring volumes is now enabled by default
|
||||
* The --namespaces flag for 'ark restore create' has been replaced by --include-namespaces and
|
||||
--exclude-namespaces
|
||||
## Older releases:
|
||||
* [CHANGELOG-0.9.md][7]
|
||||
* [CHANGELOG-0.8.md][6]
|
||||
* [CHANGELOG-0.7.md][5]
|
||||
* [CHANGELOG-0.6.md][4]
|
||||
* [CHANGELOG-0.5.md][3]
|
||||
* [CHANGELOG-0.4.md][2]
|
||||
* [CHANGELOG-0.3.md][1]
|
||||
|
||||
New features:
|
||||
* Support for S3 SSE with KMS
|
||||
* Cloud provider configurations are validated at startup
|
||||
* The persistentVolumeProvider is now optional
|
||||
* Restore objects are garbage collected
|
||||
* Each backup now has an associated log file, viewable via 'ark backup logs'
|
||||
* Each restore now has an associated log file, viewable via 'ark restore logs'
|
||||
* Add --include-resources/--exclude-resources for restores
|
||||
|
||||
Bug fixes:
|
||||
* Only save/use iops for io1 volumes on AWS
|
||||
* When restoring, try to retrieve the Backup directly from object storage if it's not found
|
||||
* When syncing Backups from object storage to Kubernetes, don't return at the first error
|
||||
encountered
|
||||
* More closely match how kubectl performs kubeconfig resolution
|
||||
* Increase default Azure API request timeout to 2 minutes
|
||||
* Update Azure diskURI to match diskName
|
||||
|
||||
#### [v0.3.3](https://github.com/heptio/ark/tree/v0.3.3) - 2017-08-10
|
||||
* Treat the first field in a schedule's cron expression as minutes, not seconds
|
||||
|
||||
#### [v0.3.2](https://github.com/heptio/ark/tree/v0.3.2) - 2017-08-07
|
||||
* Add client-go auth provider plugins for Azure, GCP, OIDC
|
||||
|
||||
#### [v0.3.1](https://github.com/heptio/ark/tree/v0.3.1) - 2017-08-03
|
||||
* Fix Makefile VERSION
|
||||
|
||||
#### [v0.3.0](https://github.com/heptio/ark/tree/v0.3.0) - 2017-08-03
|
||||
* Initial Release
|
||||
[9]: https://github.com/heptio/ark/blob/master/changelogs/unreleased
|
||||
[8]: https://github.com/heptio/ark/blob/master/changelogs/CHANGELOG-0.10.md
|
||||
[7]: https://github.com/heptio/ark/blob/master/changelogs/CHANGELOG-0.9.md
|
||||
[6]: https://github.com/heptio/ark/blob/master/changelogs/CHANGELOG-0.8.md
|
||||
[5]: https://github.com/heptio/ark/blob/master/changelogs/CHANGELOG-0.7.md
|
||||
[4]: https://github.com/heptio/ark/blob/master/changelogs/CHANGELOG-0.6.md
|
||||
[3]: https://github.com/heptio/ark/blob/master/changelogs/CHANGELOG-0.5.md
|
||||
[2]: https://github.com/heptio/ark/blob/master/changelogs/CHANGELOG-0.4.md
|
||||
[1]: https://github.com/heptio/ark/blob/master/changelogs/CHANGELOG-0.3.md
|
||||
|
||||
@@ -1,4 +1,16 @@
|
||||
# Contributing
|
||||
|
||||
## CHANGELOG
|
||||
|
||||
Authors are expected to include a changelog file with their pull requests. The changelog file
|
||||
should be a new file created in the `changelogs/unreleased` folder. The file should follow the
|
||||
naming convention of `pr-username` and the contents of the file should be your text for the
|
||||
changelog.
|
||||
|
||||
ark/changelogs/unreleased <- folder
|
||||
000-username <- file
|
||||
|
||||
|
||||
## DCO Sign off
|
||||
|
||||
All authors to the project retain copyright to their work. However, to ensure
|
||||
|
||||
23
Dockerfile-ark-restic-restore-helper.alpine
Normal file
23
Dockerfile-ark-restic-restore-helper.alpine
Normal file
@@ -0,0 +1,23 @@
|
||||
# Copyright 2018 the Heptio Ark contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM alpine:3.8
|
||||
|
||||
MAINTAINER Steve Kriss <steve@heptio.com>
|
||||
|
||||
ADD /bin/linux/amd64/ark-restic-restore-helper .
|
||||
|
||||
USER nobody:nobody
|
||||
|
||||
ENTRYPOINT [ "/ark-restic-restore-helper" ]
|
||||
31
Dockerfile-ark.alpine
Normal file
31
Dockerfile-ark.alpine
Normal file
@@ -0,0 +1,31 @@
|
||||
# Copyright 2017 the Heptio Ark contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM alpine:3.8
|
||||
|
||||
MAINTAINER Andy Goldstein <andy@heptio.com>
|
||||
|
||||
RUN apk add --no-cache ca-certificates
|
||||
|
||||
RUN apk add --update --no-cache bzip2 && \
|
||||
wget --quiet https://github.com/restic/restic/releases/download/v0.9.3/restic_0.9.3_linux_amd64.bz2 && \
|
||||
bunzip2 restic_0.9.3_linux_amd64.bz2 && \
|
||||
mv restic_0.9.3_linux_amd64 /usr/bin/restic && \
|
||||
chmod +x /usr/bin/restic
|
||||
|
||||
ADD /bin/linux/amd64/ark /ark
|
||||
|
||||
USER nobody:nobody
|
||||
|
||||
ENTRYPOINT ["/ark"]
|
||||
22
Dockerfile-fsfreeze-pause.alpine
Normal file
22
Dockerfile-fsfreeze-pause.alpine
Normal file
@@ -0,0 +1,22 @@
|
||||
# Copyright 2018 the Heptio Ark contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM alpine:3.8
|
||||
|
||||
MAINTAINER Wayne Witzel III <wayne@heptio.com>
|
||||
|
||||
RUN apk add --no-cache ca-certificates
|
||||
RUN apk add --update --no-cache busybox util-linux
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "-c", "while true; do sleep 10000; done"]
|
||||
@@ -1,25 +0,0 @@
|
||||
# Copyright 2017 the Heptio Ark project contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM alpine:3.6
|
||||
|
||||
MAINTAINER Andy Goldstein <andy@heptio.com>
|
||||
|
||||
RUN apk add --no-cache ca-certificates
|
||||
|
||||
ADD /bin/linux/amd64/ark /ark
|
||||
|
||||
USER nobody:nobody
|
||||
|
||||
ENTRYPOINT ["/ark"]
|
||||
770
Gopkg.lock
generated
770
Gopkg.lock
generated
@@ -3,51 +3,82 @@
|
||||
|
||||
[[projects]]
|
||||
name = "cloud.google.com/go"
|
||||
packages = ["compute/metadata","iam","internal","internal/optional","internal/version","storage"]
|
||||
packages = [
|
||||
"compute/metadata",
|
||||
"iam",
|
||||
"internal",
|
||||
"internal/optional",
|
||||
"internal/version",
|
||||
"storage"
|
||||
]
|
||||
revision = "44bcd0b2078ba5e7fedbeb36808d1ed893534750"
|
||||
version = "v0.11.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
packages = ["arm/disk","arm/examples/helpers","arm/resources/subscriptions","storage"]
|
||||
revision = "2d49bb8f2cee530cc16f1f1a9f0aae763dee257d"
|
||||
version = "v10.2.1-beta"
|
||||
packages = [
|
||||
"arm/disk",
|
||||
"services/storage/mgmt/2017-10-01/storage",
|
||||
"storage"
|
||||
]
|
||||
revision = "2d1d76c9013c4feb6695a2346f0e66ea0ef77aa6"
|
||||
version = "v11.3.0-beta"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/go-autorest"
|
||||
packages = ["autorest","autorest/adal","autorest/azure","autorest/date","autorest/to","autorest/validation"]
|
||||
revision = "f6e08fe5e4d45c9a66e40196d3fed5f37331d224"
|
||||
version = "v8.1.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/PuerkitoBio/purell"
|
||||
packages = ["."]
|
||||
revision = "0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/PuerkitoBio/urlesc"
|
||||
packages = ["."]
|
||||
revision = "de5bf2ad457846296e2031421a34e2568e304e35"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/asaskevich/govalidator"
|
||||
packages = ["."]
|
||||
revision = "4918b99a7cb949bb295f3c7bbaf24b577d806e35"
|
||||
version = "v6"
|
||||
packages = [
|
||||
"autorest",
|
||||
"autorest/adal",
|
||||
"autorest/azure",
|
||||
"autorest/date",
|
||||
"autorest/to",
|
||||
"autorest/validation"
|
||||
]
|
||||
revision = "1ff28809256a84bb6966640ff3d0371af82ccba4"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
packages = ["aws","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/stscreds","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/shareddefaults","private/protocol","private/protocol/ec2query","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/ec2","service/s3","service/sts"]
|
||||
revision = "1850f427c33c2558a2118dc55c1cf95a633d7432"
|
||||
version = "v1.10.27"
|
||||
packages = [
|
||||
"aws",
|
||||
"aws/awserr",
|
||||
"aws/awsutil",
|
||||
"aws/client",
|
||||
"aws/client/metadata",
|
||||
"aws/corehandlers",
|
||||
"aws/credentials",
|
||||
"aws/credentials/ec2rolecreds",
|
||||
"aws/credentials/endpointcreds",
|
||||
"aws/credentials/stscreds",
|
||||
"aws/defaults",
|
||||
"aws/ec2metadata",
|
||||
"aws/endpoints",
|
||||
"aws/request",
|
||||
"aws/session",
|
||||
"aws/signer/v4",
|
||||
"internal/sdkio",
|
||||
"internal/sdkrand",
|
||||
"internal/shareddefaults",
|
||||
"private/protocol",
|
||||
"private/protocol/ec2query",
|
||||
"private/protocol/query",
|
||||
"private/protocol/query/queryutil",
|
||||
"private/protocol/rest",
|
||||
"private/protocol/restxml",
|
||||
"private/protocol/xml/xmlutil",
|
||||
"service/ec2",
|
||||
"service/s3",
|
||||
"service/s3/s3iface",
|
||||
"service/s3/s3manager",
|
||||
"service/sts"
|
||||
]
|
||||
revision = "1f8fb9d0919e5a58992207db9512a03f76ab0274"
|
||||
version = "v1.13.12"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/cpuguy83/go-md2man"
|
||||
packages = ["md2man"]
|
||||
revision = "a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa"
|
||||
version = "v1.0.6"
|
||||
branch = "master"
|
||||
name = "github.com/beorn7/perks"
|
||||
packages = ["quantile"]
|
||||
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
@@ -58,38 +89,23 @@
|
||||
[[projects]]
|
||||
name = "github.com/dgrijalva/jwt-go"
|
||||
packages = ["."]
|
||||
revision = "d2709f9f1f31ebcda9651b03077758c1f3a0018c"
|
||||
version = "v3.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/docker/distribution"
|
||||
packages = ["digest","reference"]
|
||||
revision = "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89"
|
||||
version = "v2.6.2"
|
||||
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
|
||||
version = "v3.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/docker/spdystream"
|
||||
packages = [".","spdy"]
|
||||
packages = [
|
||||
".",
|
||||
"spdy"
|
||||
]
|
||||
revision = "bc6354cbbc295e925e4c611ffe90c1f287ee54db"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/emicklei/go-restful"
|
||||
packages = [".","log"]
|
||||
revision = "68c9750c36bb8cb433f1b88c807b4b30df4acc40"
|
||||
version = "v2.2.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/emicklei/go-restful-swagger12"
|
||||
packages = ["."]
|
||||
revision = "dcef7f55730566d41eae5db10e7d6981829720f6"
|
||||
version = "1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/fatih/camelcase"
|
||||
name = "github.com/evanphx/json-patch"
|
||||
packages = ["."]
|
||||
revision = "f6a740d52f961c60348ebb109adde9f4635d7540"
|
||||
revision = "944e07253867aacae43c04b2e6a239005443f33a"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/ghodss/yaml"
|
||||
@@ -103,57 +119,12 @@
|
||||
revision = "20b96f641a5ea98f2f8619ff4f3e061cff4833bd"
|
||||
version = "v1.28.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-openapi/analysis"
|
||||
packages = ["."]
|
||||
revision = "8ed83f2ea9f00f945516462951a288eaa68bf0d6"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-openapi/errors"
|
||||
packages = ["."]
|
||||
revision = "03cfca65330da08a5a440053faf994a3c682b5bf"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-openapi/jsonpointer"
|
||||
packages = ["."]
|
||||
revision = "779f45308c19820f1a69e9a4cd965f496e0da10f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-openapi/jsonreference"
|
||||
packages = ["."]
|
||||
revision = "36d33bfe519efae5632669801b180bf1a245da3b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-openapi/loads"
|
||||
packages = ["."]
|
||||
revision = "a80dea3052f00e5f032e860dd7355cd0cc67e24d"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-openapi/spec"
|
||||
packages = ["."]
|
||||
revision = "3faa0055dbbf2110abc1f3b4e3adbb22721e96e7"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-openapi/strfmt"
|
||||
packages = ["."]
|
||||
revision = "93a31ef21ac23f317792fff78f9539219dd74619"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/go-openapi/swag"
|
||||
packages = ["."]
|
||||
revision = "f3f9494671f93fcff853e3c6e9e948b3eb71e590"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gogo/protobuf"
|
||||
packages = ["proto","sortkeys"]
|
||||
packages = [
|
||||
"proto",
|
||||
"sortkeys"
|
||||
]
|
||||
revision = "100ba4e885062801d56799d78530b73b178a78f3"
|
||||
version = "v0.4"
|
||||
|
||||
@@ -166,9 +137,22 @@
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto","protoc-gen-go/descriptor","ptypes/any"]
|
||||
packages = [
|
||||
"proto",
|
||||
"protoc-gen-go/descriptor",
|
||||
"ptypes",
|
||||
"ptypes/any",
|
||||
"ptypes/duration",
|
||||
"ptypes/timestamp"
|
||||
]
|
||||
revision = "ab9f9a6dab164b7d1246e0e688b0ab7b94d8553e"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/google/btree"
|
||||
packages = ["."]
|
||||
revision = "e89373fe6b4a7413d7acd6da1725b83ef713e6e4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/google/gofuzz"
|
||||
@@ -181,17 +165,51 @@
|
||||
packages = ["."]
|
||||
revision = "84ed26760e7f6f80887a2fbfb50db3cc415d2cea"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/googleapis/gnostic"
|
||||
packages = [
|
||||
"OpenAPIv2",
|
||||
"compiler",
|
||||
"extensions"
|
||||
]
|
||||
revision = "ee43cbb60db7bd22502942cccbc39059117352ab"
|
||||
version = "v0.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/gregjones/httpcache"
|
||||
packages = [
|
||||
".",
|
||||
"diskcache"
|
||||
]
|
||||
revision = "9cad4c3443a7200dd6400aef47183728de563a38"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/hashicorp/go-hclog"
|
||||
packages = ["."]
|
||||
revision = "ca137eb4b4389c9bc6f1a6d887f056bf16c00510"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/hashicorp/go-plugin"
|
||||
packages = ["."]
|
||||
revision = "e2fbc6864d18d3c37b6cde4297ec9fca266d28f1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/hashicorp/golang-lru"
|
||||
packages = [".","simplelru"]
|
||||
packages = [
|
||||
".",
|
||||
"simplelru"
|
||||
]
|
||||
revision = "0a025b7e63adc15a622f29b0b2c4c3848243bbf6"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/howeyc/gopass"
|
||||
name = "github.com/hashicorp/yamux"
|
||||
packages = ["."]
|
||||
revision = "bf9dde6d0d2c004a008c27aaee91170c786f6db8"
|
||||
revision = "f5742cb6b85602e7fa834e9d5d91a7d7fa850824"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/imdario/mergo"
|
||||
@@ -208,26 +226,48 @@
|
||||
[[projects]]
|
||||
name = "github.com/jmespath/go-jmespath"
|
||||
packages = ["."]
|
||||
revision = "3433f3ea46d9f8019119e7dd41274e112a2359a9"
|
||||
version = "0.2.2"
|
||||
revision = "0b12d6b5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/juju/ratelimit"
|
||||
name = "github.com/json-iterator/go"
|
||||
packages = ["."]
|
||||
revision = "5b9ff866471762aa2ab2dced63c9fb6f53921342"
|
||||
revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||
packages = ["pbutil"]
|
||||
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/mailru/easyjson"
|
||||
packages = ["buffer","jlexer","jwriter"]
|
||||
revision = "2f5df55504ebc322e4d52d34df6a1f5b503bf26d"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/mitchellh/mapstructure"
|
||||
name = "github.com/mitchellh/go-testing-interface"
|
||||
packages = ["."]
|
||||
revision = "d0303fe809921458f417bcf828397a65db30a7e4"
|
||||
revision = "a61a99592b77c9ba629d254a693acffaeb4b7e28"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/modern-go/concurrent"
|
||||
packages = ["."]
|
||||
revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94"
|
||||
version = "1.0.3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/modern-go/reflect2"
|
||||
packages = ["."]
|
||||
revision = "1df9eeb2bb81f327b96228865c5687bc2194af3f"
|
||||
version = "1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/petar/GoLLRB"
|
||||
packages = ["llrb"]
|
||||
revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/peterbourgon/diskv"
|
||||
packages = ["."]
|
||||
revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
|
||||
version = "v2.0.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pkg/errors"
|
||||
@@ -241,75 +281,115 @@
|
||||
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
packages = [
|
||||
"prometheus",
|
||||
"prometheus/promhttp"
|
||||
]
|
||||
revision = "c5b7fccd204277076155f10851dad72b76a49317"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/client_model"
|
||||
packages = ["go"]
|
||||
revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/common"
|
||||
packages = [
|
||||
"expfmt",
|
||||
"internal/bitbucket.org/ww/goautoneg",
|
||||
"model"
|
||||
]
|
||||
revision = "7600349dcfe1abd18d72d3a1770870d9800a7801"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/procfs"
|
||||
packages = [
|
||||
".",
|
||||
"internal/util",
|
||||
"nfs",
|
||||
"xfs"
|
||||
]
|
||||
revision = "94663424ae5ae9856b40a9f170762b4197024661"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/robfig/cron"
|
||||
packages = ["."]
|
||||
revision = "df38d32658d8788cd446ba74db4bb5375c4b0cb3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/russross/blackfriday"
|
||||
packages = ["."]
|
||||
revision = "93622da34e54fb6529bfb7c57e710f37a8d9cbd8"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/satori/go.uuid"
|
||||
packages = ["."]
|
||||
revision = "879c5887cd475cd7864858769793b2ceb0d44feb"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/satori/uuid"
|
||||
packages = ["."]
|
||||
revision = "879c5887cd475cd7864858769793b2ceb0d44feb"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/shurcooL/sanitized_anchor_name"
|
||||
packages = ["."]
|
||||
revision = "541ff5ee47f1dddf6a5281af78307d921524bcb5"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/sirupsen/logrus"
|
||||
packages = [".","hooks/test"]
|
||||
packages = ["."]
|
||||
revision = "f006c2ac4710855cf0f916dd6b77acf6b048dc6e"
|
||||
version = "v1.0.3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/afero"
|
||||
packages = [".","mem"]
|
||||
packages = [
|
||||
".",
|
||||
"mem"
|
||||
]
|
||||
revision = "9be650865eab0c12963d8753212f4f9c66cdcf12"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = [".","doc"]
|
||||
revision = "cb731b898346822cc0c225c28550a8a29d93c732"
|
||||
packages = ["."]
|
||||
revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385"
|
||||
version = "v0.0.3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66"
|
||||
version = "v1.0.0"
|
||||
revision = "9a97c102cda95a86cec2345a6f09f55a939babf5"
|
||||
version = "v1.0.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/stretchr/objx"
|
||||
packages = ["."]
|
||||
revision = "1a9d0bb9f541897e62256577b352fdbc1fb4fd94"
|
||||
revision = "477a77ecc69700c7cdeb1fa9e129548e1c1c393c"
|
||||
version = "v0.1.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = ["assert","mock","require"]
|
||||
revision = "890a5c3458b43e6104ff5da8dfa139d013d77544"
|
||||
packages = [
|
||||
"assert",
|
||||
"mock",
|
||||
"require"
|
||||
]
|
||||
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
|
||||
version = "v1.2.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/ugorji/go"
|
||||
packages = ["codec"]
|
||||
revision = "5efa3251c7f7d05e5d9704a69a984ec9f1386a40"
|
||||
name = "go.opencensus.io"
|
||||
packages = [
|
||||
".",
|
||||
"internal",
|
||||
"internal/tagencoding",
|
||||
"plugin/ochttp",
|
||||
"plugin/ochttp/propagation/b3",
|
||||
"stats",
|
||||
"stats/internal",
|
||||
"stats/view",
|
||||
"tag",
|
||||
"trace",
|
||||
"trace/internal",
|
||||
"trace/propagation",
|
||||
"trace/tracestate"
|
||||
]
|
||||
revision = "79993219becaa7e29e3b60cb67f5b8e82dee11d6"
|
||||
version = "v0.17.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -320,48 +400,133 @@
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = ["context","context/ctxhttp","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace"]
|
||||
packages = [
|
||||
"context",
|
||||
"context/ctxhttp",
|
||||
"http2",
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
"internal/timeseries",
|
||||
"lex/httplex",
|
||||
"trace"
|
||||
]
|
||||
revision = "1c05540f6879653db88113bc4a2b70aec4bd491f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
packages = [".","google","internal","jws","jwt"]
|
||||
revision = "9a379c6b3e95a790ffc43293c2a78dee0d7b6e20"
|
||||
packages = [
|
||||
".",
|
||||
"google",
|
||||
"internal",
|
||||
"jws",
|
||||
"jwt"
|
||||
]
|
||||
revision = "9dcd33a902f40452422c2367fefcb95b54f9f8f8"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix","windows"]
|
||||
packages = [
|
||||
"unix",
|
||||
"windows"
|
||||
]
|
||||
revision = "43e60d72a8e2bd92ee98319ba9a384a0e9837c08"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/text"
|
||||
packages = ["internal/gen","internal/triegen","internal/ucd","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable","width"]
|
||||
packages = [
|
||||
"encoding",
|
||||
"encoding/internal",
|
||||
"encoding/internal/identifier",
|
||||
"encoding/unicode",
|
||||
"internal/gen",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"internal/utf8internal",
|
||||
"runes",
|
||||
"secure/bidirule",
|
||||
"transform",
|
||||
"unicode/bidi",
|
||||
"unicode/cldr",
|
||||
"unicode/norm",
|
||||
"unicode/rangetable"
|
||||
]
|
||||
revision = "e56139fd9c5bc7244c76116c68e500765bb6db6b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/time"
|
||||
packages = ["rate"]
|
||||
revision = "26559e0f760e39c24d730d3224364aef164ee23f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/api"
|
||||
packages = ["compute/v0.beta","gensupport","googleapi","googleapi/internal/uritemplates","googleapi/transport","internal","iterator","option","storage/v1","transport/http"]
|
||||
revision = "ed10e890a8366167a7ce33fac2b12447987bcb1c"
|
||||
packages = [
|
||||
"compute/v1",
|
||||
"gensupport",
|
||||
"googleapi",
|
||||
"googleapi/internal/uritemplates",
|
||||
"googleapi/transport",
|
||||
"internal",
|
||||
"iterator",
|
||||
"option",
|
||||
"storage/v1",
|
||||
"transport/http",
|
||||
"transport/http/internal/propagation"
|
||||
]
|
||||
revision = "3f6e8463aa1d824abe11b439d178c02220079da5"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/appengine"
|
||||
packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"]
|
||||
packages = [
|
||||
".",
|
||||
"internal",
|
||||
"internal/app_identity",
|
||||
"internal/base",
|
||||
"internal/datastore",
|
||||
"internal/log",
|
||||
"internal/modules",
|
||||
"internal/remote_api",
|
||||
"internal/urlfetch",
|
||||
"urlfetch"
|
||||
]
|
||||
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/api/annotations","googleapis/iam/v1","googleapis/rpc/status"]
|
||||
packages = [
|
||||
"googleapis/api/annotations",
|
||||
"googleapis/iam/v1",
|
||||
"googleapis/rpc/status"
|
||||
]
|
||||
revision = "ee236bd376b077c7a89f260c026c4735b195e459"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/grpc"
|
||||
packages = [".","codes","connectivity","credentials","grpclb/grpc_lb_v1","grpclog","internal","keepalive","metadata","naming","peer","stats","status","tap","transport"]
|
||||
packages = [
|
||||
".",
|
||||
"codes",
|
||||
"connectivity",
|
||||
"credentials",
|
||||
"grpclb/grpc_lb_v1",
|
||||
"grpclog",
|
||||
"health",
|
||||
"health/grpc_health_v1",
|
||||
"internal",
|
||||
"keepalive",
|
||||
"metadata",
|
||||
"naming",
|
||||
"peer",
|
||||
"stats",
|
||||
"status",
|
||||
"tap",
|
||||
"transport"
|
||||
]
|
||||
revision = "b3ddf786825de56a4178401b7e174ee332173b66"
|
||||
version = "v1.5.2"
|
||||
|
||||
@@ -371,12 +536,6 @@
|
||||
revision = "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4"
|
||||
version = "v0.9.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
name = "gopkg.in/mgo.v2"
|
||||
packages = ["bson","internal/json"]
|
||||
revision = "3f83fa5005286a7fe593b055f0d7771a7dce4655"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
@@ -384,31 +543,270 @@
|
||||
revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f"
|
||||
|
||||
[[projects]]
|
||||
name = "k8s.io/apimachinery"
|
||||
packages = ["pkg/api/equality","pkg/api/errors","pkg/api/meta","pkg/api/resource","pkg/apimachinery","pkg/apimachinery/announced","pkg/apimachinery/registered","pkg/apis/meta/v1","pkg/apis/meta/v1/unstructured","pkg/apis/meta/v1alpha1","pkg/conversion","pkg/conversion/queryparams","pkg/conversion/unstructured","pkg/fields","pkg/labels","pkg/openapi","pkg/runtime","pkg/runtime/schema","pkg/runtime/serializer","pkg/runtime/serializer/json","pkg/runtime/serializer/protobuf","pkg/runtime/serializer/recognizer","pkg/runtime/serializer/streaming","pkg/runtime/serializer/versioning","pkg/selection","pkg/types","pkg/util/cache","pkg/util/clock","pkg/util/diff","pkg/util/errors","pkg/util/framer","pkg/util/httpstream","pkg/util/httpstream/spdy","pkg/util/intstr","pkg/util/json","pkg/util/net","pkg/util/rand","pkg/util/remotecommand","pkg/util/runtime","pkg/util/sets","pkg/util/validation","pkg/util/validation/field","pkg/util/wait","pkg/util/yaml","pkg/version","pkg/watch","third_party/forked/golang/netutil","third_party/forked/golang/reflect"]
|
||||
revision = "1fd2e63a9a370677308a42f24fd40c86438afddf"
|
||||
|
||||
[[projects]]
|
||||
name = "k8s.io/client-go"
|
||||
packages = ["discovery","discovery/fake","dynamic","kubernetes","kubernetes/scheme","kubernetes/typed/admissionregistration/v1alpha1","kubernetes/typed/apps/v1beta1","kubernetes/typed/authentication/v1","kubernetes/typed/authentication/v1beta1","kubernetes/typed/authorization/v1","kubernetes/typed/authorization/v1beta1","kubernetes/typed/autoscaling/v1","kubernetes/typed/autoscaling/v2alpha1","kubernetes/typed/batch/v1","kubernetes/typed/batch/v2alpha1","kubernetes/typed/certificates/v1beta1","kubernetes/typed/core/v1","kubernetes/typed/extensions/v1beta1","kubernetes/typed/networking/v1","kubernetes/typed/policy/v1beta1","kubernetes/typed/rbac/v1alpha1","kubernetes/typed/rbac/v1beta1","kubernetes/typed/settings/v1alpha1","kubernetes/typed/storage/v1","kubernetes/typed/storage/v1beta1","pkg/api","pkg/api/v1","pkg/api/v1/ref","pkg/apis/admissionregistration","pkg/apis/admissionregistration/v1alpha1","pkg/apis/apps","pkg/apis/apps/v1beta1","pkg/apis/authentication","pkg/apis/authentication/v1","pkg/apis/authentication/v1beta1","pkg/apis/authorization","pkg/apis/authorization/v1","pkg/apis/authorization/v1beta1","pkg/apis/autoscaling","pkg/apis/autoscaling/v1","pkg/apis/autoscaling/v2alpha1","pkg/apis/batch","pkg/apis/batch/v1","pkg/apis/batch/v2alpha1","pkg/apis/certificates","pkg/apis/certificates/v1beta1","pkg/apis/extensions","pkg/apis/extensions/v1beta1","pkg/apis/networking","pkg/apis/networking/v1","pkg/apis/policy","pkg/apis/policy/v1beta1","pkg/apis/rbac","pkg/apis/rbac/v1alpha1","pkg/apis/rbac/v1beta1","pkg/apis/settings","pkg/apis/settings/v1alpha1","pkg/apis/storage","pkg/apis/storage/v1","pkg/apis/storage/v1beta1","pkg/util","pkg/util/parsers","pkg/version","plugin/pkg/client/auth/azure","plugin/pkg/client/auth/gcp","plugin/pkg/client/auth/oidc","rest","rest/watch","testing","third_party/forked/golang/template","tools/auth","tools/cache","tools/clientcmd","tools/clientcmd/api","tools/clientcmd/api/latest","tools/clientcmd/api/v1","tools/metrics","tools/remotecommand","transport","util/cert","util/exec","util/flowcontrol","util/homedir","util/integer","util/jsonpath","util/workqueue"]
|
||||
revision = "d92e8497f71b7b4e0494e5bd204b48d34bd6f254"
|
||||
version = "v4.0.0"
|
||||
name = "k8s.io/api"
|
||||
packages = [
|
||||
"admission/v1beta1",
|
||||
"admissionregistration/v1alpha1",
|
||||
"admissionregistration/v1beta1",
|
||||
"apps/v1",
|
||||
"apps/v1beta1",
|
||||
"apps/v1beta2",
|
||||
"authentication/v1",
|
||||
"authentication/v1beta1",
|
||||
"authorization/v1",
|
||||
"authorization/v1beta1",
|
||||
"autoscaling/v1",
|
||||
"autoscaling/v2beta1",
|
||||
"batch/v1",
|
||||
"batch/v1beta1",
|
||||
"batch/v2alpha1",
|
||||
"certificates/v1beta1",
|
||||
"core/v1",
|
||||
"events/v1beta1",
|
||||
"extensions/v1beta1",
|
||||
"imagepolicy/v1alpha1",
|
||||
"networking/v1",
|
||||
"policy/v1beta1",
|
||||
"rbac/v1",
|
||||
"rbac/v1alpha1",
|
||||
"rbac/v1beta1",
|
||||
"scheduling/v1alpha1",
|
||||
"scheduling/v1beta1",
|
||||
"settings/v1alpha1",
|
||||
"storage/v1",
|
||||
"storage/v1alpha1",
|
||||
"storage/v1beta1"
|
||||
]
|
||||
revision = "072894a440bdee3a891dea811fe42902311cd2a3"
|
||||
version = "kubernetes-1.11.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "k8s.io/gengo"
|
||||
packages = ["args","generator","namer","parser","types"]
|
||||
revision = "2ef5ef33e269934e14149598f5a85d1f561a7219"
|
||||
name = "k8s.io/apiextensions-apiserver"
|
||||
packages = [
|
||||
"pkg/apis/apiextensions",
|
||||
"pkg/apis/apiextensions/v1beta1"
|
||||
]
|
||||
revision = "07bbbb7a28a34c56bf9d1b192a88cc9b2350095e"
|
||||
|
||||
[[projects]]
|
||||
branch = "release-1.11"
|
||||
name = "k8s.io/apimachinery"
|
||||
packages = [
|
||||
"pkg/api/equality",
|
||||
"pkg/api/errors",
|
||||
"pkg/api/meta",
|
||||
"pkg/api/resource",
|
||||
"pkg/apis/meta/internalversion",
|
||||
"pkg/apis/meta/v1",
|
||||
"pkg/apis/meta/v1/unstructured",
|
||||
"pkg/apis/meta/v1/unstructured/unstructuredscheme",
|
||||
"pkg/apis/meta/v1beta1",
|
||||
"pkg/conversion",
|
||||
"pkg/conversion/queryparams",
|
||||
"pkg/fields",
|
||||
"pkg/labels",
|
||||
"pkg/runtime",
|
||||
"pkg/runtime/schema",
|
||||
"pkg/runtime/serializer",
|
||||
"pkg/runtime/serializer/json",
|
||||
"pkg/runtime/serializer/protobuf",
|
||||
"pkg/runtime/serializer/recognizer",
|
||||
"pkg/runtime/serializer/streaming",
|
||||
"pkg/runtime/serializer/versioning",
|
||||
"pkg/selection",
|
||||
"pkg/types",
|
||||
"pkg/util/cache",
|
||||
"pkg/util/clock",
|
||||
"pkg/util/diff",
|
||||
"pkg/util/duration",
|
||||
"pkg/util/errors",
|
||||
"pkg/util/framer",
|
||||
"pkg/util/httpstream",
|
||||
"pkg/util/httpstream/spdy",
|
||||
"pkg/util/intstr",
|
||||
"pkg/util/json",
|
||||
"pkg/util/mergepatch",
|
||||
"pkg/util/net",
|
||||
"pkg/util/remotecommand",
|
||||
"pkg/util/runtime",
|
||||
"pkg/util/sets",
|
||||
"pkg/util/strategicpatch",
|
||||
"pkg/util/validation",
|
||||
"pkg/util/validation/field",
|
||||
"pkg/util/wait",
|
||||
"pkg/util/yaml",
|
||||
"pkg/version",
|
||||
"pkg/watch",
|
||||
"third_party/forked/golang/json",
|
||||
"third_party/forked/golang/netutil",
|
||||
"third_party/forked/golang/reflect"
|
||||
]
|
||||
revision = "103fd098999dc9c0c88536f5c9ad2e5da39373ae"
|
||||
|
||||
[[projects]]
|
||||
name = "k8s.io/client-go"
|
||||
packages = [
|
||||
"discovery",
|
||||
"discovery/fake",
|
||||
"dynamic",
|
||||
"informers",
|
||||
"informers/admissionregistration",
|
||||
"informers/admissionregistration/v1alpha1",
|
||||
"informers/admissionregistration/v1beta1",
|
||||
"informers/apps",
|
||||
"informers/apps/v1",
|
||||
"informers/apps/v1beta1",
|
||||
"informers/apps/v1beta2",
|
||||
"informers/autoscaling",
|
||||
"informers/autoscaling/v1",
|
||||
"informers/autoscaling/v2beta1",
|
||||
"informers/batch",
|
||||
"informers/batch/v1",
|
||||
"informers/batch/v1beta1",
|
||||
"informers/batch/v2alpha1",
|
||||
"informers/certificates",
|
||||
"informers/certificates/v1beta1",
|
||||
"informers/core",
|
||||
"informers/core/v1",
|
||||
"informers/events",
|
||||
"informers/events/v1beta1",
|
||||
"informers/extensions",
|
||||
"informers/extensions/v1beta1",
|
||||
"informers/internalinterfaces",
|
||||
"informers/networking",
|
||||
"informers/networking/v1",
|
||||
"informers/policy",
|
||||
"informers/policy/v1beta1",
|
||||
"informers/rbac",
|
||||
"informers/rbac/v1",
|
||||
"informers/rbac/v1alpha1",
|
||||
"informers/rbac/v1beta1",
|
||||
"informers/scheduling",
|
||||
"informers/scheduling/v1alpha1",
|
||||
"informers/scheduling/v1beta1",
|
||||
"informers/settings",
|
||||
"informers/settings/v1alpha1",
|
||||
"informers/storage",
|
||||
"informers/storage/v1",
|
||||
"informers/storage/v1alpha1",
|
||||
"informers/storage/v1beta1",
|
||||
"kubernetes",
|
||||
"kubernetes/scheme",
|
||||
"kubernetes/typed/admissionregistration/v1alpha1",
|
||||
"kubernetes/typed/admissionregistration/v1beta1",
|
||||
"kubernetes/typed/apps/v1",
|
||||
"kubernetes/typed/apps/v1beta1",
|
||||
"kubernetes/typed/apps/v1beta2",
|
||||
"kubernetes/typed/authentication/v1",
|
||||
"kubernetes/typed/authentication/v1beta1",
|
||||
"kubernetes/typed/authorization/v1",
|
||||
"kubernetes/typed/authorization/v1beta1",
|
||||
"kubernetes/typed/autoscaling/v1",
|
||||
"kubernetes/typed/autoscaling/v2beta1",
|
||||
"kubernetes/typed/batch/v1",
|
||||
"kubernetes/typed/batch/v1beta1",
|
||||
"kubernetes/typed/batch/v2alpha1",
|
||||
"kubernetes/typed/certificates/v1beta1",
|
||||
"kubernetes/typed/core/v1",
|
||||
"kubernetes/typed/events/v1beta1",
|
||||
"kubernetes/typed/extensions/v1beta1",
|
||||
"kubernetes/typed/networking/v1",
|
||||
"kubernetes/typed/policy/v1beta1",
|
||||
"kubernetes/typed/rbac/v1",
|
||||
"kubernetes/typed/rbac/v1alpha1",
|
||||
"kubernetes/typed/rbac/v1beta1",
|
||||
"kubernetes/typed/scheduling/v1alpha1",
|
||||
"kubernetes/typed/scheduling/v1beta1",
|
||||
"kubernetes/typed/settings/v1alpha1",
|
||||
"kubernetes/typed/storage/v1",
|
||||
"kubernetes/typed/storage/v1alpha1",
|
||||
"kubernetes/typed/storage/v1beta1",
|
||||
"listers/admissionregistration/v1alpha1",
|
||||
"listers/admissionregistration/v1beta1",
|
||||
"listers/apps/v1",
|
||||
"listers/apps/v1beta1",
|
||||
"listers/apps/v1beta2",
|
||||
"listers/autoscaling/v1",
|
||||
"listers/autoscaling/v2beta1",
|
||||
"listers/batch/v1",
|
||||
"listers/batch/v1beta1",
|
||||
"listers/batch/v2alpha1",
|
||||
"listers/certificates/v1beta1",
|
||||
"listers/core/v1",
|
||||
"listers/events/v1beta1",
|
||||
"listers/extensions/v1beta1",
|
||||
"listers/networking/v1",
|
||||
"listers/policy/v1beta1",
|
||||
"listers/rbac/v1",
|
||||
"listers/rbac/v1alpha1",
|
||||
"listers/rbac/v1beta1",
|
||||
"listers/scheduling/v1alpha1",
|
||||
"listers/scheduling/v1beta1",
|
||||
"listers/settings/v1alpha1",
|
||||
"listers/storage/v1",
|
||||
"listers/storage/v1alpha1",
|
||||
"listers/storage/v1beta1",
|
||||
"pkg/apis/clientauthentication",
|
||||
"pkg/apis/clientauthentication/v1alpha1",
|
||||
"pkg/apis/clientauthentication/v1beta1",
|
||||
"pkg/version",
|
||||
"plugin/pkg/client/auth/azure",
|
||||
"plugin/pkg/client/auth/exec",
|
||||
"plugin/pkg/client/auth/gcp",
|
||||
"plugin/pkg/client/auth/oidc",
|
||||
"rest",
|
||||
"rest/watch",
|
||||
"restmapper",
|
||||
"testing",
|
||||
"third_party/forked/golang/template",
|
||||
"tools/auth",
|
||||
"tools/cache",
|
||||
"tools/clientcmd",
|
||||
"tools/clientcmd/api",
|
||||
"tools/clientcmd/api/latest",
|
||||
"tools/clientcmd/api/v1",
|
||||
"tools/metrics",
|
||||
"tools/pager",
|
||||
"tools/reference",
|
||||
"tools/remotecommand",
|
||||
"transport",
|
||||
"transport/spdy",
|
||||
"util/buffer",
|
||||
"util/cert",
|
||||
"util/connrotation",
|
||||
"util/exec",
|
||||
"util/flowcontrol",
|
||||
"util/homedir",
|
||||
"util/integer",
|
||||
"util/jsonpath",
|
||||
"util/retry",
|
||||
"util/workqueue"
|
||||
]
|
||||
revision = "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65"
|
||||
version = "v8.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "k8s.io/kube-openapi"
|
||||
packages = ["pkg/util/proto"]
|
||||
revision = "d83b052f768a50a309c692a9c271da3f3276ff88"
|
||||
|
||||
[[projects]]
|
||||
name = "k8s.io/kubernetes"
|
||||
packages = ["cmd/libs/go2idl/client-gen","cmd/libs/go2idl/client-gen/args","cmd/libs/go2idl/client-gen/generators","cmd/libs/go2idl/client-gen/generators/fake","cmd/libs/go2idl/client-gen/generators/scheme","cmd/libs/go2idl/client-gen/path","cmd/libs/go2idl/client-gen/types","cmd/libs/go2idl/informer-gen","cmd/libs/go2idl/informer-gen/generators","cmd/libs/go2idl/lister-gen","cmd/libs/go2idl/lister-gen/generators","pkg/printers","pkg/util/slice"]
|
||||
revision = "793658f2d7ca7f064d2bdf606519f9fe1229c381"
|
||||
version = "v1.7.4"
|
||||
packages = [
|
||||
"pkg/kubectl/genericclioptions",
|
||||
"pkg/kubectl/genericclioptions/printers",
|
||||
"pkg/kubectl/genericclioptions/resource",
|
||||
"pkg/kubectl/scheme",
|
||||
"pkg/printers"
|
||||
]
|
||||
revision = "91e7b4fd31fcd3d5f436da26c980becec37ceefe"
|
||||
version = "v1.11.0"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "37edb445765bd183e89ff47d8a7822a132c3752a8b528e34f499ad4858f792a8"
|
||||
inputs-digest = "7979aebee2c67e7fa68bddf050ef32b75a2f51145d26d00a54f6bf489af635a2"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
||||
118
Gopkg.toml
118
Gopkg.toml
@@ -20,81 +20,103 @@
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
|
||||
required = [
|
||||
"k8s.io/kubernetes/cmd/libs/go2idl/client-gen",
|
||||
"k8s.io/kubernetes/cmd/libs/go2idl/lister-gen",
|
||||
"k8s.io/kubernetes/cmd/libs/go2idl/informer-gen"
|
||||
]
|
||||
[prune]
|
||||
unused-packages = true
|
||||
non-go = true
|
||||
go-tests = true
|
||||
|
||||
#
|
||||
# Kubernetes packages
|
||||
#
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/kubernetes"
|
||||
version = "~1.11"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/client-go"
|
||||
version = "~8.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/apimachinery"
|
||||
version = "kubernetes-1.11.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/api"
|
||||
version = "kubernetes-1.11.0"
|
||||
|
||||
# vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go:104:16:
|
||||
# unknown field 'CaseSensitive' in struct literal of type jsoniter.Config
|
||||
[[override]]
|
||||
name = "github.com/json-iterator/go"
|
||||
revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682"
|
||||
|
||||
# vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/azure.go:300:25:
|
||||
# cannot call non-function spt.Token (type adal.Token)
|
||||
[[override]]
|
||||
name = "github.com/Azure/go-autorest"
|
||||
revision = "1ff28809256a84bb6966640ff3d0371af82ccba4"
|
||||
|
||||
#
|
||||
# Cloud provider packages
|
||||
#
|
||||
[[constraint]]
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
version = "1.13.12"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
version = "~11.3.0-beta"
|
||||
|
||||
[[constraint]]
|
||||
name = "cloud.google.com/go"
|
||||
version = "0.11.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
version = "~10.2.1-beta"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/Azure/go-autorest"
|
||||
version = "~8.1.x"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
version = "1.10.26"
|
||||
|
||||
[[constraint]]
|
||||
name = "google.golang.org/api"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "golang.org/x/oauth2"
|
||||
branch = "master"
|
||||
|
||||
#
|
||||
# Third party packages
|
||||
#
|
||||
[[constraint]]
|
||||
name = "github.com/golang/glog"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/robfig/cron"
|
||||
revision = "df38d32658d8788cd446ba74db4bb5375c4b0cb3"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/satori/go.uuid"
|
||||
version = "1.1.0"
|
||||
|
||||
# TODO(1.0) this repo is a redirect to github.com/satori/go.uuid. Our
|
||||
# current version of azure-sdk-for-go references this redirect, so
|
||||
# use it so we don't get a duplicate copy of this dependency.
|
||||
# Once our azure-sdk-for-go is updated to a newer version (where
|
||||
# their dependency has changed to .../go.uuid), switch this to
|
||||
# github.com/satori/go.uuid
|
||||
[[constraint]]
|
||||
name = "github.com/satori/uuid"
|
||||
version = "1.1.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/afero"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/cobra"
|
||||
version = "0.0.3"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/spf13/pflag"
|
||||
version = "1.0.0"
|
||||
version = "1.0.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/stretchr/testify"
|
||||
version = "~1.2.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/hashicorp/go-plugin"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/api"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/apimachinery"
|
||||
revision = "1fd2e63a9a370677308a42f24fd40c86438afddf"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/client-go"
|
||||
version = "~4.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/kubernetes"
|
||||
version = "~1.7"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/russross/blackfriday"
|
||||
revision = "93622da34e54fb6529bfb7c57e710f37a8d9cbd8"
|
||||
|
||||
141
Makefile
141
Makefile
@@ -15,7 +15,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
# The binary to build (just the basename).
|
||||
BIN := ark
|
||||
BIN ?= ark
|
||||
|
||||
# This repo's root import path (under GOPATH).
|
||||
PKG := github.com/heptio/ark
|
||||
@@ -24,16 +24,19 @@ PKG := github.com/heptio/ark
|
||||
REGISTRY ?= gcr.io/heptio-images
|
||||
|
||||
# Which architecture to build - see $(ALL_ARCH) for options.
|
||||
# if the 'local' rule is being run, detect the ARCH from 'go env'
|
||||
# if it wasn't specified by the caller.
|
||||
local : ARCH ?= $(shell go env GOOS)-$(shell go env GOARCH)
|
||||
ARCH ?= linux-amd64
|
||||
|
||||
VERSION ?= master
|
||||
|
||||
TAG_LATEST ?= false
|
||||
|
||||
###
|
||||
### These variables should not need tweaking.
|
||||
###
|
||||
|
||||
SRC_DIRS := cmd pkg # directories which hold app source (not vendored)
|
||||
|
||||
CLI_PLATFORMS := linux-amd64 linux-arm linux-arm64 darwin-amd64 windows-amd64
|
||||
CONTAINER_PLATFORMS := linux-amd64 linux-arm linux-arm64
|
||||
|
||||
@@ -44,7 +47,7 @@ GOARCH = $(word 2, $(platform_temp))
|
||||
# TODO(ncdc): support multiple image architectures once gcr.io supports manifest lists
|
||||
# Set default base image dynamically for each arch
|
||||
ifeq ($(GOARCH),amd64)
|
||||
DOCKERFILE ?= Dockerfile.alpine
|
||||
DOCKERFILE ?= Dockerfile-$(BIN).alpine
|
||||
endif
|
||||
#ifeq ($(GOARCH),arm)
|
||||
# DOCKERFILE ?= Dockerfile.arm #armel/busybox
|
||||
@@ -53,14 +56,14 @@ endif
|
||||
# DOCKERFILE ?= Dockerfile.arm64 #aarch64/busybox
|
||||
#endif
|
||||
|
||||
IMAGE := $(REGISTRY)/$(BIN)
|
||||
|
||||
BUILD_IMAGE ?= gcr.io/heptio-images/golang:1.9-alpine3.6
|
||||
IMAGE = $(REGISTRY)/$(BIN)
|
||||
|
||||
# If you want to build all binaries, see the 'all-build' rule.
|
||||
# If you want to build all containers, see the 'all-container' rule.
|
||||
# If you want to build AND push all containers, see the 'all-push' rule.
|
||||
all: build
|
||||
all:
|
||||
@$(MAKE) build
|
||||
@$(MAKE) build BIN=ark-restic-restore-helper
|
||||
|
||||
build-%:
|
||||
@$(MAKE) --no-print-directory ARCH=$* build
|
||||
@@ -77,53 +80,96 @@ all-build: $(addprefix build-, $(CLI_PLATFORMS))
|
||||
|
||||
#all-push: $(addprefix push-, $(CONTAINER_PLATFORMS))
|
||||
|
||||
local: build-dirs
|
||||
GOOS=$(GOOS) \
|
||||
GOARCH=$(GOARCH) \
|
||||
VERSION=$(VERSION) \
|
||||
PKG=$(PKG) \
|
||||
BIN=$(BIN) \
|
||||
OUTPUT_DIR=$$(pwd)/_output/bin/$(GOOS)/$(GOARCH) \
|
||||
./hack/build.sh
|
||||
|
||||
build: _output/bin/$(GOOS)/$(GOARCH)/$(BIN)
|
||||
|
||||
_output/bin/$(GOOS)/$(GOARCH)/$(BIN): build-dirs
|
||||
@echo "building: $@"
|
||||
@$(MAKE) shell CMD="-c '\
|
||||
$(MAKE) shell CMD="-c '\
|
||||
GOOS=$(GOOS) \
|
||||
GOARCH=$(GOARCH) \
|
||||
VERSION=$(VERSION) \
|
||||
PKG=$(PKG) \
|
||||
BIN=$(BIN) \
|
||||
OUTPUT_DIR=/output/$(GOOS)/$(GOARCH) \
|
||||
./hack/build.sh'"
|
||||
|
||||
TTY := $(shell tty -s && echo "-t")
|
||||
|
||||
BUILDER_IMAGE := ark-builder
|
||||
|
||||
# Example: make shell CMD="date > datefile"
|
||||
shell: build-dirs
|
||||
shell: build-dirs build-image
|
||||
@# the volume bind-mount of $PWD/vendor/k8s.io/api is needed for code-gen to
|
||||
@# function correctly (ref. https://github.com/kubernetes/kubernetes/pull/64567)
|
||||
@docker run \
|
||||
-e GOFLAGS \
|
||||
-i $(TTY) \
|
||||
--rm \
|
||||
-u $$(id -u):$$(id -g) \
|
||||
-v "$$(pwd)/.go/pkg:/go/pkg" \
|
||||
-v "$$(pwd)/.go/src:/go/src" \
|
||||
-v "$$(pwd)/.go/std:/go/std" \
|
||||
-v "$$(pwd):/go/src/$(PKG)" \
|
||||
-v "$$(pwd)/_output/bin:/output" \
|
||||
-v "$$(pwd)/.go/std/$(GOOS)/$(GOARCH):/usr/local/go/pkg/$(GOOS)_$(GOARCH)_static" \
|
||||
-v "$$(pwd)/vendor/k8s.io/api:/go/src/k8s.io/api:delegated" \
|
||||
-v "$$(pwd)/.go/pkg:/go/pkg:delegated" \
|
||||
-v "$$(pwd)/.go/std:/go/std:delegated" \
|
||||
-v "$$(pwd):/go/src/$(PKG):delegated" \
|
||||
-v "$$(pwd)/_output/bin:/output:delegated" \
|
||||
-v "$$(pwd)/.go/std/$(GOOS)/$(GOARCH):/usr/local/go/pkg/$(GOOS)_$(GOARCH)_static:delegated" \
|
||||
-v "$$(pwd)/.go/go-build:/.cache/go-build:delegated" \
|
||||
-w /go/src/$(PKG) \
|
||||
$(BUILD_IMAGE) \
|
||||
$(BUILDER_IMAGE) \
|
||||
/bin/sh $(CMD)
|
||||
|
||||
DOTFILE_IMAGE = $(subst :,_,$(subst /,_,$(IMAGE))-$(VERSION))
|
||||
|
||||
# Use a slightly customized build/push targets since we don't have a Go binary to build for the fsfreeze image
|
||||
build-fsfreeze: BIN = fsfreeze-pause
|
||||
build-fsfreeze:
|
||||
@cp $(DOCKERFILE) _output/.dockerfile-$(BIN).alpine
|
||||
@docker build -t $(IMAGE):$(VERSION) -f _output/.dockerfile-$(BIN).alpine _output
|
||||
@docker images -q $(IMAGE):$(VERSION) > .container-$(DOTFILE_IMAGE)
|
||||
|
||||
push-fsfreeze: BIN = fsfreeze-pause
|
||||
push-fsfreeze:
|
||||
@docker push $(IMAGE):$(VERSION)
|
||||
ifeq ($(TAG_LATEST), true)
|
||||
docker tag $(IMAGE):$(VERSION) $(IMAGE):latest
|
||||
docker push $(IMAGE):latest
|
||||
endif
|
||||
@docker images -q $(REGISTRY)/fsfreeze-pause:$(VERSION) > .container-$(DOTFILE_IMAGE)
|
||||
|
||||
all-containers:
|
||||
$(MAKE) container
|
||||
$(MAKE) container BIN=ark-restic-restore-helper
|
||||
$(MAKE) build-fsfreeze
|
||||
|
||||
container: verify test .container-$(DOTFILE_IMAGE) container-name
|
||||
.container-$(DOTFILE_IMAGE): _output/bin/$(GOOS)/$(GOARCH)/$(BIN) $(DOCKERFILE)
|
||||
@cp $(DOCKERFILE) _output/.dockerfile-$(GOOS)-$(GOARCH)
|
||||
@docker build -t $(IMAGE):$(VERSION) -f _output/.dockerfile-$(GOOS)-$(GOARCH) _output
|
||||
@cp $(DOCKERFILE) _output/.dockerfile-$(BIN)-$(GOOS)-$(GOARCH)
|
||||
@docker build -t $(IMAGE):$(VERSION) -f _output/.dockerfile-$(BIN)-$(GOOS)-$(GOARCH) _output
|
||||
@docker images -q $(IMAGE):$(VERSION) > $@
|
||||
|
||||
container-name:
|
||||
@echo "container: $(IMAGE):$(VERSION)"
|
||||
|
||||
all-push:
|
||||
$(MAKE) push
|
||||
$(MAKE) push BIN=ark-restic-restore-helper
|
||||
$(MAKE) push-fsfreeze
|
||||
|
||||
|
||||
push: .push-$(DOTFILE_IMAGE) push-name
|
||||
.push-$(DOTFILE_IMAGE): .container-$(DOTFILE_IMAGE)
|
||||
ifeq ($(findstring gcr.io,$(REGISTRY)),gcr.io)
|
||||
@gcloud docker -- push $(IMAGE):$(VERSION)
|
||||
else
|
||||
@docker push $(IMAGE):$(VERSION)
|
||||
ifeq ($(TAG_LATEST), true)
|
||||
docker tag $(IMAGE):$(VERSION) $(IMAGE):latest
|
||||
docker push $(IMAGE):latest
|
||||
endif
|
||||
@docker images -q $(IMAGE):$(VERSION) > $@
|
||||
|
||||
@@ -133,51 +179,38 @@ push-name:
|
||||
SKIP_TESTS ?=
|
||||
test: build-dirs
|
||||
ifneq ($(SKIP_TESTS), 1)
|
||||
@$(MAKE) shell CMD="-c 'hack/test.sh $(SRC_DIRS)'"
|
||||
@$(MAKE) shell CMD="-c 'hack/test.sh $(WHAT)'"
|
||||
endif
|
||||
|
||||
fmt:
|
||||
@$(MAKE) shell CMD="-c 'hack/update-fmt.sh'"
|
||||
test-local: build-dirs
|
||||
ifneq ($(SKIP_TESTS), 1)
|
||||
hack/test.sh $(WHAT)
|
||||
endif
|
||||
|
||||
verify:
|
||||
ifneq ($(SKIP_TESTS), 1)
|
||||
@$(MAKE) shell CMD="-c 'hack/verify-all.sh'"
|
||||
endif
|
||||
|
||||
update: fmt
|
||||
update:
|
||||
@$(MAKE) shell CMD="-c 'hack/update-all.sh'"
|
||||
|
||||
release: all-tar-bin checksum
|
||||
|
||||
checksum:
|
||||
@cd _output/release; \
|
||||
sha256sum *.tar.gz > CHECKSUM; \
|
||||
cat CHECKSUM; \
|
||||
sha256sum CHECKSUM
|
||||
|
||||
all-tar-bin: $(addprefix tar-bin-, $(CLI_PLATFORMS))
|
||||
|
||||
tar-bin-%:
|
||||
@$(MAKE) ARCH=$* tar-bin
|
||||
|
||||
tar-bin: build
|
||||
mkdir -p _output/release
|
||||
|
||||
# We do the subshell & wildcard ls so we can pick up $(BIN).exe for windows
|
||||
(cd _output/bin/$(GOOS)/$(GOARCH) && ls $(BIN)*) | \
|
||||
tar \
|
||||
-C _output/bin/$(GOOS)/$(GOARCH) \
|
||||
--files-from=- \
|
||||
-zcf _output/release/$(BIN)-$(GOOS)-$(GOARCH).tar.gz
|
||||
|
||||
build-dirs:
|
||||
@mkdir -p _output/bin/$(GOOS)/$(GOARCH)
|
||||
@mkdir -p .go/src/$(PKG) .go/pkg .go/bin .go/std/$(GOOS)/$(GOARCH)
|
||||
@mkdir -p .go/src/$(PKG) .go/pkg .go/bin .go/std/$(GOOS)/$(GOARCH) .go/go-build
|
||||
|
||||
clean: container-clean bin-clean
|
||||
build-image:
|
||||
cd hack/build-image && docker build -t $(BUILDER_IMAGE) .
|
||||
|
||||
container-clean:
|
||||
clean:
|
||||
rm -rf .container-* _output/.dockerfile-* .push-*
|
||||
|
||||
bin-clean:
|
||||
rm -rf .go _output
|
||||
docker rmi $(BUILDER_IMAGE)
|
||||
|
||||
ci: all verify test
|
||||
|
||||
changelog:
|
||||
hack/changelog.sh
|
||||
|
||||
release:
|
||||
hack/goreleaser.sh
|
||||
|
||||
205
README.md
205
README.md
@@ -2,211 +2,78 @@
|
||||
|
||||
**Maintainers:** [Heptio][0]
|
||||
|
||||
[![Build Status][1]][2]
|
||||
[![Build Status][1]][2] <a href="https://zenhub.com"><img src="https://raw.githubusercontent.com/ZenHubIO/support/master/zenhub-badge.png"></a>
|
||||
|
||||
## Overview
|
||||
Heptio Ark is a utility for managing disaster recovery, specifically for your [Kubernetes][14] cluster resources and persistent volumes. It provides a simple, configurable, and operationally robust way to back up and restore applications and PVs from a series of checkpoints. This allows you to better automate in the following scenarios:
|
||||
|
||||
* **Disaster recovery** with reduced TTR (time to respond), in the case of:
|
||||
* Infrastructure loss
|
||||
* Data corruption
|
||||
* Service outages
|
||||
Ark gives you tools to back up and restore your Kubernetes cluster resources and persistent volumes. Ark lets you:
|
||||
|
||||
* **Cross-cloud-provider migration** for Kubernetes API objects (cross-cloud-provider migration of persistent volume snapshots not yet supported)
|
||||
* Take backups of your cluster and restore in case of loss.
|
||||
* Copy cluster resources to other clusters.
|
||||
* Replicate your production environment for development and testing environments.
|
||||
|
||||
* **Dev and testing environment setup (+ CI)**, via replication of prod environment
|
||||
Ark consists of:
|
||||
|
||||
More concretely, Heptio Ark combines an in-cluster service with a CLI that allows you to record both:
|
||||
1. *Configurable subsets of Kubernetes API objects* -- as tarballs stored in object storage
|
||||
2. *Disk snapshots of Persistent Volumes* -- via the cloud provider APIs
|
||||
* A server that runs on your cluster
|
||||
* A command-line client that runs locally
|
||||
|
||||
Heptio Ark currently supports the [AWS][15], [GCP][16], and [Azure][17] cloud provider platforms.
|
||||
You can run Ark in clusters on a cloud provider or on-premises. For detailed information, see [Compatible Storage Providers][99].
|
||||
|
||||
## Quickstart
|
||||
## Breaking changes
|
||||
|
||||
This guide gets Ark up and running on your cluster, and goes through an example using the following:
|
||||
* **Minio, an S3-compatible storage service** that runs locally on your cluster. This is the storage service where backup files are uploaded. *Note that Ark is intended to run on a cloud provider--we are using Minio here to keep the example convenient and self-contained.*
|
||||
Ark version 0.10.0 introduces a number of breaking changes. Before you upgrade to version 0.10.0, make sure to read [the documentation on upgrading][98].
|
||||
|
||||
* **A sample nginx app** under the `nginx-example` namespace, used to demonstrate Ark's backup and restore functionality.
|
||||
## More information
|
||||
|
||||
Note that this example *does not* include a demonstration of PV disk snapshots, because that feature requires integration with a cloud provider API. For snapshotting examples and instructions specific to AWS, GCP, and Azure, see [Cloud Provider Specifics][23].
|
||||
|
||||
### 0. Prerequisites
|
||||
|
||||
* *You should have access to an up-and-running Kubernetes cluster (minimum version 1.7).* If you do not have a cluster, [choose a setup solution][9] from the official Kubernetes docs.
|
||||
|
||||
* *You will need to have a DNS server set up on your cluster for the example files to work.* You can check this with `kubectl get svc -l k8s-app=kube-dns --namespace=kube-system`. If said service does not exist, [these instructions][12] may help.
|
||||
|
||||
* *You should have `kubectl` installed.* If not, follow the instructions for [installing via Homebrew (MacOS)][10] or [building the binary (Linux)][11].
|
||||
|
||||
### 1. Download
|
||||
Clone or fork the Heptio Ark repo:
|
||||
```
|
||||
git clone git@github.com:heptio/ark.git
|
||||
```
|
||||
|
||||
> NOTE: Documentation may change between releases. See the [Changelog][20] for links to previous versions of this repository and its docs.
|
||||
>
|
||||
> To ensure that you are working off a specific release, `git checkout <VERSION_TAG>` where `<VERSION_TAG>` is the appropriate tag for the Ark version you wish to use (e.g. "v0.3.3"). You should `git checkout master` only if you're planning on [building the Ark image from scratch][7].
|
||||
|
||||
### 2. Setup
|
||||
|
||||
There are two types of Ark instances that work in tandem:
|
||||
1. **Ark server**: Runs persistently on the cluster.
|
||||
2. **Ark client**: Launched by the user whenever they want to initiate an operation (e.g. a backup).
|
||||
|
||||
To get the server started on your cluster (as well as the local storage service), execute the following commands in Ark's root directory:
|
||||
|
||||
```
|
||||
kubectl apply -f examples/common/00-prereqs.yaml
|
||||
kubectl apply -f examples/minio/
|
||||
kubectl apply -f examples/common/10-deployment.yaml
|
||||
```
|
||||
|
||||
*NOTE: If you encounter an error related to Config creation, wait for a minute and run the command again. (The Config CRD does not always finish registering in time.)*
|
||||
|
||||
Now deploy the example nginx app:
|
||||
```
|
||||
kubectl apply -f examples/nginx-app/base.yaml
|
||||
```
|
||||
|
||||
Check to see that both the Ark and nginx deployments have been successfully created:
|
||||
```
|
||||
kubectl get deployments -l component=ark --namespace=heptio-ark
|
||||
kubectl get deployments --namespace=nginx-example
|
||||
```
|
||||
|
||||
Finally, install the Ark client somehwere in your `$PATH`:
|
||||
* [Download a pre-built release][26], or
|
||||
* [Build it from scratch][7]
|
||||
|
||||
|
||||
### 3. Back up and restore
|
||||
First, create a backup specifically for any object matching the `app=nginx` label selector:
|
||||
|
||||
```
|
||||
ark backup create nginx-backup --selector app=nginx
|
||||
```
|
||||
|
||||
Now you can mimic a disaster with the following:
|
||||
```
|
||||
kubectl delete namespace nginx-example
|
||||
```
|
||||
Oh no! The nginx deployment and service are both gone, as you can see (though you may have to wait a minute or two for the namespace be fully cleaned up):
|
||||
```
|
||||
kubectl get deployments --namespace=nginx-example
|
||||
kubectl get services --namespace=nginx-example
|
||||
```
|
||||
Neither commands should yield any results. However, because Ark has your back(up), you can run this command:
|
||||
```
|
||||
ark restore create nginx-backup
|
||||
```
|
||||
|
||||
To check on the status of the Restore:
|
||||
```
|
||||
ark restore get
|
||||
```
|
||||
|
||||
The output should look something like the table below:
|
||||
```
|
||||
NAME BACKUP STATUS WARNINGS ERRORS CREATED SELECTOR
|
||||
nginx-backup-20170727200524 nginx-backup Completed 0 0 2017-07-27 20:05:24 +0000 UTC <none>
|
||||
```
|
||||
|
||||
If the Restore's `STATUS` column is "Completed", and `WARNINGS` and `ERRORS` are both zero, the restore is a success. All of the objects in the `nginx-example` namespace should be just as they were before.
|
||||
|
||||
Otherwise, if there are warnings or errors indicated, you can run the following command to look at them in more detail:
|
||||
```
|
||||
ark restore get <RESTORE NAME> -o yaml
|
||||
```
|
||||
See the [debugging documentation][18] for more details.
|
||||
|
||||
*NOTE*: In the example files, the `storage` volume is defined via `hostPath` for better visibility. If you're curious to see the [structure of the backup files][13] firsthand, you can find the compressed results in `/tmp/minio/ark/nginx-backup`.
|
||||
|
||||
### 4. Tear Down
|
||||
Using the following command, you can remove all Kubernetes objects associated with this example:
|
||||
```
|
||||
kubectl delete -f examples/common/
|
||||
kubectl delete -f examples/minio/
|
||||
kubectl delete -f examples/nginx-app/base.yaml
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
Each of Heptio Ark's operations (Backups, Schedules, and Restores) are custom resources themselves, defined using [CRDs][20]. Their accompanying [custom controllers][21] handle them when they are submitted to the Kubernetes API server.
|
||||
|
||||
As mentioned before, Ark runs in two different modes:
|
||||
|
||||
* **Ark client**: Allows you to query, create, and delete the Ark resources as desired.
|
||||
|
||||
* **Ark server**: Runs all of the Ark controllers. Each controller watches its respective custom resource for API operations, performs validation, and handles the majority of the cloud API logic (e.g. interfacing with object storage and persistent volumes).
|
||||
|
||||
Looking at a specific example--an `ark backup create test-backup` command triggers the following operations:
|
||||
|
||||
![19]
|
||||
|
||||
1. The *ark client* makes a call to the Kubernetes API server, creating a `Backup` custom resource (which is stored in [etcd][22]).
|
||||
|
||||
2. The `BackupController` sees that a new `Backup` has been created, and validates it.
|
||||
|
||||
3. Once validation passes, the `BackupController` begins the backup process. It collects data by querying the Kubernetes API Server for resources.
|
||||
|
||||
4. Once the data has been aggregated, the `BackupController` makes a call to the object storage service (e.g. Amazon S3) to upload the backup file.
|
||||
|
||||
5. By default, Ark also makes disk snapshots of any persistent volumes, using the appropriate cloud service API. (This can be disabled via the option `--snapshot-volumes=false`)
|
||||
|
||||
## Further documentation
|
||||
|
||||
To learn more about Heptio Ark operations and their applications, see the [`/docs` directory][3].
|
||||
[The documentation][29] provides a getting started guide, plus information about building from source, architecture, extending Ark, and more.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you encounter any problems that the documentation does not address, [file an issue][4] or talk to us on the [Kubernetes Slack team][25] channel `#ark-dr`.
|
||||
If you encounter issues, review the [troubleshooting docs][30], [file an issue][4], or talk to us on the [#ark-dr channel][25] on the Kubernetes Slack server.
|
||||
|
||||
## Contributing
|
||||
|
||||
Thanks for taking the time to join our community and start contributing!
|
||||
|
||||
Feedback and discussion is available on [the mailing list][24].
|
||||
Feedback and discussion are available on [the mailing list][24].
|
||||
|
||||
#### Before you start
|
||||
### Before you start
|
||||
|
||||
* Please familiarize yourself with the [Code of Conduct][8] before contributing.
|
||||
* See [CONTRIBUTING.md][5] for instructions on the developer certificate of origin that we require.
|
||||
* Read how [we're using ZenHub][26] for project and roadmap planning
|
||||
|
||||
#### Pull requests
|
||||
### Pull requests
|
||||
|
||||
* We welcome pull requests. Feel free to dig through the [issues][4] and jump in.
|
||||
|
||||
|
||||
## Changelog
|
||||
|
||||
See [the list of releases][6] to find out about feature changes.
|
||||
|
||||
[0]: https://github.com/heptio
|
||||
[1]: https://jenkins.i.heptio.com/buildStatus/icon?job=ark-master
|
||||
[2]: https://jenkins.i.heptio.com/job/ark-master/
|
||||
[3]: /docs
|
||||
[1]: https://travis-ci.org/heptio/ark.svg?branch=master
|
||||
[2]: https://travis-ci.org/heptio/ark
|
||||
|
||||
[4]: https://github.com/heptio/ark/issues
|
||||
[5]: /CONTRIBUTING.md
|
||||
[6]: /CHANGELOG.md
|
||||
[7]: /docs/build-from-scratch.md
|
||||
[8]: /CODE_OF_CONDUCT.md
|
||||
[5]: https://github.com/heptio/ark/blob/master/CONTRIBUTING.md
|
||||
[6]: https://github.com/heptio/ark/releases
|
||||
|
||||
[8]: https://github.com/heptio/ark/blob/master/CODE_OF_CONDUCT.md
|
||||
[9]: https://kubernetes.io/docs/setup/
|
||||
[10]: https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-with-homebrew-on-macos
|
||||
[11]: https://kubernetes.io/docs/tasks/tools/install-kubectl/#tabset-1
|
||||
[12]: https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/dns/README.md
|
||||
[13]: /docs/output-file-format.md
|
||||
[14]: https://github.com/kubernetes/kubernetes
|
||||
[15]: https://aws.amazon.com/
|
||||
[16]: https://cloud.google.com/
|
||||
[17]: https://azure.microsoft.com/
|
||||
[18]: /docs/debugging-restores.md
|
||||
[19]: /docs/img/backup-process.png
|
||||
[20]: https://kubernetes.io/docs/concepts/api-extension/custom-resources/#customresourcedefinitions
|
||||
[21]: https://kubernetes.io/docs/concepts/api-extension/custom-resources/#custom-controllers
|
||||
[22]: https://github.com/coreos/etcd
|
||||
[23]: /docs/cloud-provider-specifics.md
|
||||
|
||||
|
||||
[24]: http://j.hept.io/ark-list
|
||||
[25]: http://slack.kubernetes.io/
|
||||
[26]: https://github.com/heptio/ark/releases
|
||||
[25]: https://kubernetes.slack.com/messages/ark-dr
|
||||
[26]: https://github.com/heptio/ark/blob/master/docs/zenhub.md
|
||||
|
||||
|
||||
[29]: https://heptio.github.io/ark/
|
||||
[30]: /docs/troubleshooting.md
|
||||
|
||||
[98]: /docs/upgrading-to-v0.10.md
|
||||
[99]: /docs/support-matrix.md
|
||||
|
||||
249
changelogs/CHANGELOG-0.10.md
Normal file
249
changelogs/CHANGELOG-0.10.md
Normal file
@@ -0,0 +1,249 @@
|
||||
- [v0.10.1](#v0101)
|
||||
- [v0.10.0](#v0100)
|
||||
|
||||
## v0.10.1
|
||||
#### 2019-01-10
|
||||
|
||||
### Download
|
||||
- https://github.com/heptio/ark/releases/tag/v0.10.1
|
||||
|
||||
### Changes
|
||||
* Fix minio setup job command (#1118, @acbramley)
|
||||
* Add debugging-install link in doc get-started.md (#1131, @hex108)
|
||||
* `ark version`: show full git SHA & combine git tree state indicator with git SHA line (#1124, @skriss)
|
||||
* Delete spec.priority in pod restore action (#879, @mwieczorek)
|
||||
* Allow to use AWS Signature v1 for creating signed AWS urls (#811, @bashofmann)
|
||||
* add multizone/regional support to gcp (#765, @wwitzel3)
|
||||
* Fixed the newline output when deleting a schedule. (#1120, @jwhitcraft)
|
||||
* Remove obsolete make targets and rename 'make goreleaser' to 'make release' (#1114, @skriss)
|
||||
* Update to go 1.11 (#1069, @gliptak)
|
||||
* Update CHANGELOGs (#1063, @wwitzel3)
|
||||
* Initialize empty schedule metrics on server init (#1054, @cbeneke)
|
||||
* Added brew reference (#1051, @omerlh)
|
||||
* Remove default token from all service accounts (#1048, @ncdc)
|
||||
* Add pprof support to the Ark server (#234, @ncdc)
|
||||
|
||||
## v0.10.0
|
||||
#### 2018-11-15
|
||||
|
||||
### Download
|
||||
- https://github.com/heptio/ark/releases/tag/v0.10.0
|
||||
|
||||
### Highlights
|
||||
- We've introduced two new custom resource definitions, `BackupStorageLocation` and `VolumeSnapshotLocation`, that replace the `Config` CRD from
|
||||
previous versions. As part of this, you may now configure more than one possible location for where backups and snapshots are stored, and when you
|
||||
create a `Backup` you can select the location where you'd like that particular backup to be stored. See the [Locations documentation][2] for an overview
|
||||
of this feature.
|
||||
- Ark's plugin system has been significantly refactored to improve robustness and ease of development. Plugin processes are now automatically restarted
|
||||
if they unexpectedly terminate. Additionally, plugin binaries can now contain more than one plugin implementation (e.g. and object store *and* a block store,
|
||||
or many backup item actions).
|
||||
- The sync process, which ensures that Backup custom resources exist for each backup in object storage, has been revamped to run much more frequently (once
|
||||
per minute rather than once per hour), to use significantly fewer cloud provider API calls, and to not generate spurious Kubernetes API errors.
|
||||
- Ark can now be configured to store all data under a prefix within an object storage bucket. This means that you no longer need a separate bucket per Ark
|
||||
instance; you can now have all of your clusters' Ark backups go into a single bucket, with each cluster having its own prefix/subdirectory
|
||||
within that bucket.
|
||||
- Restic backup data is now automatically stored within the same bucket/prefix as the rest of the Ark data. A separate bucket is no longer required (or allowed).
|
||||
- Ark resources (backups, restores, schedules) can now be bulk-deleted through the `ark` CLI, using the `--all` or `--selector` flags, or by specifying
|
||||
multiple resource names as arguments to the `delete` commands.
|
||||
- The `ark` CLI now supports waiting for backups and restores to complete with the `--wait` flag for `ark backup create` and `ark restore create`
|
||||
- Restores can be created directly from the most recent backup for a schedule, using `ark restore create --from-schedule SCHEDULE_NAME`
|
||||
|
||||
### Breaking Changes
|
||||
|
||||
Heptio Ark v0.10 contains a number of breaking changes. Upgrading will require some additional steps beyond just updating your client binary and your
|
||||
container image tag. We've provided a [detailed set of instructions][1] to help you with the upgrade process. **Please read and follow these instructions
|
||||
carefully to ensure a successful upgrade!**
|
||||
|
||||
- The `Config` CRD has been replaced by `BackupStorageLocation` and `VolumeSnapshotLocation` CRDs.
|
||||
- The interface for external plugins (object/block stores, backup/restore item actions) has changed. If you have authored any custom plugins, they'll
|
||||
need to be updated for v0.10.
|
||||
- The [`ObjectStore.ListCommonPrefixes`](https://github.com/heptio/ark/blob/master/pkg/cloudprovider/object_store.go#L50) signature has changed to add a `prefix` parameter.
|
||||
- Registering plugins has changed. Create a new plugin server with the `NewServer` function, and register plugins with the appropriate functions. See the [`Server`](https://github.com/heptio/ark/blob/master/pkg/plugin/server.go#L37) interface for details.
|
||||
- The organization of Ark data in object storage has changed. Existing data will need to be moved around to conform to the new layout.
|
||||
|
||||
### All Changes
|
||||
- [b9de44ff](https://github.com/heptio/ark/commit/b9de44ff) update docs to reference config/ dir within release tarballs
|
||||
- [eace0255](https://github.com/heptio/ark/commit/eace0255) goreleaser: update example image tags to match version being released
|
||||
- [cff02159](https://github.com/heptio/ark/commit/cff02159) add rbac content, rework get-started for NodePort and publicUrl, add versioning information
|
||||
- [fa14255e](https://github.com/heptio/ark/commit/fa14255e) add content for docs issue 819
|
||||
- [22959071](https://github.com/heptio/ark/commit/22959071) add doc explaining locations
|
||||
- [e5556fe6](https://github.com/heptio/ark/commit/e5556fe6) Added qps and burst to server's client
|
||||
- [9ae861c9](https://github.com/heptio/ark/commit/9ae861c9) Support a separate URL base for pre-signed URLs
|
||||
- [698420b6](https://github.com/heptio/ark/commit/698420b6) Update storage-layout-reorg-v0.10.md
|
||||
- [6c9e1f18](https://github.com/heptio/ark/commit/6c9e1f18) lower some noisy logs to debug level
|
||||
- [318fd8a8](https://github.com/heptio/ark/commit/318fd8a8) add troubleshooting for loadbalancer restores
|
||||
- [defb8aa8](https://github.com/heptio/ark/commit/defb8aa8) remove code that checks directly for a backup from restore controller
|
||||
- [7abe1156](https://github.com/heptio/ark/commit/7abe1156) Move clearing up of metadata before plugin's actions
|
||||
- [ec013e6f](https://github.com/heptio/ark/commit/ec013e6f) Document upgrading plugins in the deployment
|
||||
- [d6162e94](https://github.com/heptio/ark/commit/d6162e94) fix goreleaser bugs
|
||||
- [a15df276](https://github.com/heptio/ark/commit/a15df276) Add correct link and change role
|
||||
- [46bed015](https://github.com/heptio/ark/commit/46bed015) add 0.10 breaking changes warning to readme in master
|
||||
- [e3a7d6a2](https://github.com/heptio/ark/commit/e3a7d6a2) add content for issue 994
|
||||
- [400911e9](https://github.com/heptio/ark/commit/400911e9) address docs issue #978
|
||||
- [b818cc27](https://github.com/heptio/ark/commit/b818cc27) don't require a default provider VSL if there's only 1
|
||||
- [90638086](https://github.com/heptio/ark/commit/90638086) v0.10 changelog
|
||||
- [6e2166c4](https://github.com/heptio/ark/commit/6e2166c4) add docs page on versions and upgrading
|
||||
- [18b434cb](https://github.com/heptio/ark/commit/18b434cb) goreleaser scripts for building/creating a release on a workstation
|
||||
- [bb65d67a](https://github.com/heptio/ark/commit/bb65d67a) update restic prerequisite with min k8s version
|
||||
- [b5a2ccd5](https://github.com/heptio/ark/commit/b5a2ccd5) Silence git detached HEAD advice in build container
|
||||
- [67749141](https://github.com/heptio/ark/commit/67749141) instructions for upgrading to v0.10
|
||||
- [516422c2](https://github.com/heptio/ark/commit/516422c2) sync controller: fill in missing .spec.storageLocation
|
||||
- [195e6aaf](https://github.com/heptio/ark/commit/195e6aaf) fix bug preventing PV snapshots from v0.10 backups from restoring
|
||||
- [bca58516](https://github.com/heptio/ark/commit/bca58516) Run 'make update' to update formatting
|
||||
- [573ce7d0](https://github.com/heptio/ark/commit/573ce7d0) Update formatting script
|
||||
- [90d9be59](https://github.com/heptio/ark/commit/90d9be59) support restoring/deleting legacy backups with .status.volumeBackups
|
||||
- [ef194972](https://github.com/heptio/ark/commit/ef194972) rename variables #967
|
||||
- [6d4e702c](https://github.com/heptio/ark/commit/6d4e702c) fix broken link
|
||||
- [596eea1b](https://github.com/heptio/ark/commit/596eea1b) restore storageclasses before pvs and pvcs
|
||||
- [f014cab1](https://github.com/heptio/ark/commit/f014cab1) backup describer: show snapshot summary by default, details optionally
|
||||
- [8acc66d0](https://github.com/heptio/ark/commit/8acc66d0) remove pvProviderExists param from NewRestoreController
|
||||
- [57ce590f](https://github.com/heptio/ark/commit/57ce590f) create a struct for multiple return of same type in restore_contoroller #967
|
||||
- [028fafb6](https://github.com/heptio/ark/commit/028fafb6) Corrected grammatical error
|
||||
- [db856aff](https://github.com/heptio/ark/commit/db856aff) Specify return arguments
|
||||
- [9952dfb0](https://github.com/heptio/ark/commit/9952dfb0) Address #424: Add CRDs to list of prioritized resources
|
||||
- [cf2c2714](https://github.com/heptio/ark/commit/cf2c2714) fix bugs in GetBackupVolumeSnapshots and add test
|
||||
- [ec124673](https://github.com/heptio/ark/commit/ec124673) remove all references to Config from docs/examples
|
||||
- [c36131a0](https://github.com/heptio/ark/commit/c36131a0) remove Config-related code
|
||||
- [406b50a7](https://github.com/heptio/ark/commit/406b50a7) update restore process using snapshot locations
|
||||
- [268080ad](https://github.com/heptio/ark/commit/268080ad) avoid panics if can't get block store during deletion
|
||||
- [4a03370f](https://github.com/heptio/ark/commit/4a03370f) update backup deletion controller for snapshot locations
|
||||
- [38c72b8c](https://github.com/heptio/ark/commit/38c72b8c) include snapshot locations in created schedule's backup spec
|
||||
- [0ec2de55](https://github.com/heptio/ark/commit/0ec2de55) azure: update blockstore to allow storing snaps in different resource group
|
||||
- [35bb533c](https://github.com/heptio/ark/commit/35bb533c) close gzip writer before uploading volumesnapshots file
|
||||
- [da9ed38c](https://github.com/heptio/ark/commit/da9ed38c) store volume snapshot info as JSON in backup storage
|
||||
- [e24248e0](https://github.com/heptio/ark/commit/e24248e0) add --volume-snapshot-locations flag to ark backup create
|
||||
- [df07b7dc](https://github.com/heptio/ark/commit/df07b7dc) update backup code to work with volume snapshot locations
|
||||
- [4af89fa8](https://github.com/heptio/ark/commit/4af89fa8) add unit test for getDefaultVolumeSnapshotLocations
|
||||
- [02f50b9c](https://github.com/heptio/ark/commit/02f50b9c) add default-volume-snapshot-locations to server cmd
|
||||
- [1aa712d2](https://github.com/heptio/ark/commit/1aa712d2) Default and validate VolumeSnapshotLocations
|
||||
- [bbf76985](https://github.com/heptio/ark/commit/bbf76985) add create CLI command for snapshot locations
|
||||
- [aeb221ea](https://github.com/heptio/ark/commit/aeb221ea) Add printer for snapshot locations
|
||||
- [ffc612ac](https://github.com/heptio/ark/commit/ffc612ac) Add volume snapshot CLI get command
|
||||
- [f20342aa](https://github.com/heptio/ark/commit/f20342aa) Add VolumeLocation and Snapshot.
|
||||
- [7172db8a](https://github.com/heptio/ark/commit/7172db8a) upgrade to restic v0.9.3
|
||||
- [99adc4fa](https://github.com/heptio/ark/commit/99adc4fa) Remove broken references to docs that are not existing
|
||||
- [474efde6](https://github.com/heptio/ark/commit/474efde6) Fixed relative link for image
|
||||
- [41735154](https://github.com/heptio/ark/commit/41735154) don't require a default backup storage location to exist
|
||||
- [0612c5de](https://github.com/heptio/ark/commit/0612c5de) templatize error message in DeleteOptions
|
||||
- [66bcbc05](https://github.com/heptio/ark/commit/66bcbc05) add support for bulk deletion to ark schedule delete
|
||||
- [3af43b49](https://github.com/heptio/ark/commit/3af43b49) add azure-specific code to support multi-location restic
|
||||
- [d009163b](https://github.com/heptio/ark/commit/d009163b) update restic to support multiple backup storage locations
|
||||
- [f4c99c77](https://github.com/heptio/ark/commit/f4c99c77) Change link for the support matrix
|
||||
- [91e45d56](https://github.com/heptio/ark/commit/91e45d56) Fix broken storage providers link
|
||||
- [ed0eb865](https://github.com/heptio/ark/commit/ed0eb865) fix backup storage location example YAMLs
|
||||
- [eb709b8f](https://github.com/heptio/ark/commit/eb709b8f) only sync a backup location if it's changed since last sync
|
||||
- [af3af1b5](https://github.com/heptio/ark/commit/af3af1b5) clarify Azure resource group usage in docs
|
||||
- [9fdf8513](https://github.com/heptio/ark/commit/9fdf8513) Minor code cleanup
|
||||
- [2073e15a](https://github.com/heptio/ark/commit/2073e15a) Fix formatting for live site
|
||||
- [0fc3e8d8](https://github.com/heptio/ark/commit/0fc3e8d8) add documentation on running Ark on-premises
|
||||
- [e46e89cb](https://github.com/heptio/ark/commit/e46e89cb) have restic share main Ark bucket
|
||||
- [42b54586](https://github.com/heptio/ark/commit/42b54586) refactor to make valid dirs part of an object store layout
|
||||
- [8bc7e4f6](https://github.com/heptio/ark/commit/8bc7e4f6) store backups & restores in backups/, restores/ subdirs in obj storage
|
||||
- [e3232b7e](https://github.com/heptio/ark/commit/e3232b7e) add support for bulk deletion to ark restore delete
|
||||
- [17be71e1](https://github.com/heptio/ark/commit/17be71e1) remove deps used for docs gen
|
||||
- [20635106](https://github.com/heptio/ark/commit/20635106) remove script for generating docs
|
||||
- [6fd9ea9d](https://github.com/heptio/ark/commit/6fd9ea9d) remove cli reference docs and related scripts
|
||||
- [4833607a](https://github.com/heptio/ark/commit/4833607a) Fix infinite sleep in fsfreeze container
|
||||
- [7668bfd4](https://github.com/heptio/ark/commit/7668bfd4) Add links for Portworx plugin support
|
||||
- [468006e6](https://github.com/heptio/ark/commit/468006e6) Fix Portworx name in doc
|
||||
- [e6b44539](https://github.com/heptio/ark/commit/e6b44539) Make fsfreeze image building consistent
|
||||
- [fcd27a13](https://github.com/heptio/ark/commit/fcd27a13) get a new metadata accessor after calling backup item actions
|
||||
- [ffef86e3](https://github.com/heptio/ark/commit/ffef86e3) Adding support for the AWS_CLUSTER_NAME env variable allowing to claim volumes ownership
|
||||
- [cda3dff8](https://github.com/heptio/ark/commit/cda3dff8) Document single binary plugins
|
||||
- [f049e078](https://github.com/heptio/ark/commit/f049e078) Remove ROADMAP.md, update ZenHub link to Ark board
|
||||
- [94617b30](https://github.com/heptio/ark/commit/94617b30) convert all controllers to use genericController, logContext -> log
|
||||
- [779cb428](https://github.com/heptio/ark/commit/779cb428) Document SignatureDoesNotMatch error and triaging
|
||||
- [7d8813a9](https://github.com/heptio/ark/commit/7d8813a9) move ObjectStore mock into pkg/cloudprovider/mocks
|
||||
- [f0edf733](https://github.com/heptio/ark/commit/f0edf733) add a BackupStore to pkg/persistence that supports prefixes
|
||||
- [af64069d](https://github.com/heptio/ark/commit/af64069d) create pkg/persistence and move relevant code from pkg/cloudprovider into it
|
||||
- [29d75d72](https://github.com/heptio/ark/commit/29d75d72) move object and block store interfaces to their own files
|
||||
- [211aa7b7](https://github.com/heptio/ark/commit/211aa7b7) Set schedule labels to subsequent backups
|
||||
- [d34994cb](https://github.com/heptio/ark/commit/d34994cb) set azure restic env vars based on default backup location's config
|
||||
- [a50367f1](https://github.com/heptio/ark/commit/a50367f1) Regenerate CLI docs
|
||||
- [7bc27bbb](https://github.com/heptio/ark/commit/7bc27bbb) Pin cobra version
|
||||
- [e94277ac](https://github.com/heptio/ark/commit/e94277ac) Update pflag version
|
||||
- [df69b274](https://github.com/heptio/ark/commit/df69b274) azure: update documentation and examples
|
||||
- [cb321db2](https://github.com/heptio/ark/commit/cb321db2) azure: refactor to not use helpers/ pkg, validate all env/config inputs
|
||||
- [9d7ea748](https://github.com/heptio/ark/commit/9d7ea748) azure: support different RGs/storage accounts per backup location
|
||||
- [cd4e9f53](https://github.com/heptio/ark/commit/cd4e9f53) azure: fix for breaking change in blob.GetSASURI
|
||||
- [a440029c](https://github.com/heptio/ark/commit/a440029c) bump Azure SDK version and include storage mgmt package
|
||||
- [b31e25bf](https://github.com/heptio/ark/commit/b31e25bf) server: remove unused code, replace deprecated func
|
||||
- [729d7339](https://github.com/heptio/ark/commit/729d7339) controllers: take a newPluginManager func in constructors
|
||||
- [6445dbf1](https://github.com/heptio/ark/commit/6445dbf1) Update examples and docs for backup locations
|
||||
- [133dc185](https://github.com/heptio/ark/commit/133dc185) backup sync: process the default location first
|
||||
- [7a1e6d16](https://github.com/heptio/ark/commit/7a1e6d16) generic controller: allow controllers with only a resync func
|
||||
- [6f7bfe54](https://github.com/heptio/ark/commit/6f7bfe54) remove Config CRD's BackupStorageProvider & other obsolete code
|
||||
- [bd4d97b9](https://github.com/heptio/ark/commit/bd4d97b9) move server's defaultBackupLocation into config struct
|
||||
- [0e94fa37](https://github.com/heptio/ark/commit/0e94fa37) update sync controller for backup locations
|
||||
- [2750aa71](https://github.com/heptio/ark/commit/2750aa71) Use backup storage location during restore
|
||||
- [20f89fbc](https://github.com/heptio/ark/commit/20f89fbc) use the default backup storage location for restic
|
||||
- [833a6307](https://github.com/heptio/ark/commit/833a6307) Add storage location to backup get/describe
|
||||
- [cf7c8587](https://github.com/heptio/ark/commit/cf7c8587) download request: fix setting of log level for plugin manager
|
||||
- [3234124a](https://github.com/heptio/ark/commit/3234124a) backup deletion: fix setting of log level in plugin manager
|
||||
- [74043ab4](https://github.com/heptio/ark/commit/74043ab4) download request controller: fix bug in determining expiration
|
||||
- [7007f198](https://github.com/heptio/ark/commit/7007f198) refactor download request controller test and add test cases
|
||||
- [8f534615](https://github.com/heptio/ark/commit/8f534615) download request controller: use backup location for object store
|
||||
- [bab08ed1](https://github.com/heptio/ark/commit/bab08ed1) backup deletion controller: use backup location for object store
|
||||
- [c6f488f7](https://github.com/heptio/ark/commit/c6f488f7) Use backup location in the backup controller
|
||||
- [06b5af44](https://github.com/heptio/ark/commit/06b5af44) add create and get CLI commands for backup locations
|
||||
- [adbcd370](https://github.com/heptio/ark/commit/adbcd370) add --default-backup-storage-location flag to server cmd
|
||||
- [2a34772e](https://github.com/heptio/ark/commit/2a34772e) Add --storage-location argument to create commands
|
||||
- [56f16170](https://github.com/heptio/ark/commit/56f16170) Correct metadata for BackupStorageLocationList
|
||||
- [345c3c39](https://github.com/heptio/ark/commit/345c3c39) Generate clients for BackupStorageLocation
|
||||
- [a25eb032](https://github.com/heptio/ark/commit/a25eb032) Add BackupStorageLocation API type
|
||||
- [575c4ddc](https://github.com/heptio/ark/commit/575c4ddc) apply annotations on single line, no restore mode
|
||||
- [030ea6c0](https://github.com/heptio/ark/commit/030ea6c0) minor word updates and command wrapping
|
||||
- [d32f8dbb](https://github.com/heptio/ark/commit/d32f8dbb) Update hooks/fsfreeze example
|
||||
- [342a1c64](https://github.com/heptio/ark/commit/342a1c64) add an ark bug command
|
||||
- [9c11ba90](https://github.com/heptio/ark/commit/9c11ba90) Add DigitalOcean to S3-compatible backup providers
|
||||
- [ea50ebf2](https://github.com/heptio/ark/commit/ea50ebf2) Fix map merging logic
|
||||
- [9508e4a2](https://github.com/heptio/ark/commit/9508e4a2) Switch Config CRD elements to server flags
|
||||
- [0c3ac67b](https://github.com/heptio/ark/commit/0c3ac67b) start using a namespaced label on restored objects, deprecate old label
|
||||
- [6e53aa03](https://github.com/heptio/ark/commit/6e53aa03) Bring back 'make local'
|
||||
- [5acccaa7](https://github.com/heptio/ark/commit/5acccaa7) add bulk deletion support to ark backup delete
|
||||
- [3aa241a7](https://github.com/heptio/ark/commit/3aa241a7) Preserve node ports during restore when annotations hold specification.
|
||||
- [c5f5862c](https://github.com/heptio/ark/commit/c5f5862c) Add --wait support to ark backup create
|
||||
- [eb6f742b](https://github.com/heptio/ark/commit/eb6f742b) Document CRD not found errors
|
||||
- [fb4d507c](https://github.com/heptio/ark/commit/fb4d507c) Extend doc about synchronization
|
||||
- [e7bb5926](https://github.com/heptio/ark/commit/e7bb5926) Add --wait support to `ark restore create`
|
||||
- [8ce513ac](https://github.com/heptio/ark/commit/8ce513ac) Only delete unused backup if they are complete
|
||||
- [1c26fbde](https://github.com/heptio/ark/commit/1c26fbde) remove SnapshotService, replace with direct BlockStore usage
|
||||
- [13051218](https://github.com/heptio/ark/commit/13051218) Refactor plugin management
|
||||
- [74dbf387](https://github.com/heptio/ark/commit/74dbf387) Add restore failed phase and metrics
|
||||
- [8789ae5c](https://github.com/heptio/ark/commit/8789ae5c) update testify to latest released version
|
||||
- [fe9d61a9](https://github.com/heptio/ark/commit/fe9d61a9) Add schedule command info to quickstart
|
||||
- [ca5656c2](https://github.com/heptio/ark/commit/ca5656c2) fix bug preventing backup item action item updates from saving
|
||||
- [d2e629f5](https://github.com/heptio/ark/commit/d2e629f5) Delete backups from etcd if they're not in storage
|
||||
- [625ba481](https://github.com/heptio/ark/commit/625ba481) Fix ZenHub link on Readme.md
|
||||
- [dcae6eb0](https://github.com/heptio/ark/commit/dcae6eb0) Update gcp-config.md
|
||||
- [06d6665a](https://github.com/heptio/ark/commit/06d6665a) check s3URL scheme upon AWS ObjectStore Init()
|
||||
- [cc359f6e](https://github.com/heptio/ark/commit/cc359f6e) Add contributor docs for our ZenHub usage
|
||||
- [f6204562](https://github.com/heptio/ark/commit/f6204562) cleanup service account action log statement
|
||||
- [450fa72f](https://github.com/heptio/ark/commit/450fa72f) Initialize schedule Prometheus metrics to have them created beforehand (see https://prometheus.io/docs/practices/instrumentation/#avoid-missing-metrics)
|
||||
- [39c4267a](https://github.com/heptio/ark/commit/39c4267a) Clarify that object storage should per-cluster
|
||||
- [78cbdf95](https://github.com/heptio/ark/commit/78cbdf95) delete old deletion requests for backup when processing a new one
|
||||
- [85a61b8e](https://github.com/heptio/ark/commit/85a61b8e) return nil error if 404 encountered when deleting snapshots
|
||||
- [a2a7dbda](https://github.com/heptio/ark/commit/a2a7dbda) fix tagging latest by using make's ifeq
|
||||
- [b4a52e45](https://github.com/heptio/ark/commit/b4a52e45) Add commands for context to the bug template
|
||||
- [3efe6770](https://github.com/heptio/ark/commit/3efe6770) Update Ark library code to work with Kubernetes 1.11
|
||||
- [7e8c8c69](https://github.com/heptio/ark/commit/7e8c8c69) Add some basic troubleshooting commands
|
||||
- [d1955120](https://github.com/heptio/ark/commit/d1955120) require namespace for backups/etc. to exist at server startup
|
||||
- [683f7afc](https://github.com/heptio/ark/commit/683f7afc) switch to using .status.startTimestamp for sorting backups
|
||||
- [b71a37db](https://github.com/heptio/ark/commit/b71a37db) Record backup completion time before uploading
|
||||
- [217084cd](https://github.com/heptio/ark/commit/217084cd) Add example ark version command to issue templates
|
||||
- [040788bb](https://github.com/heptio/ark/commit/040788bb) Add minor improvements and aws example<Plug>delimitMateCR
|
||||
- [5b89f7b6](https://github.com/heptio/ark/commit/5b89f7b6) Skip backup sync if it already exists in k8s
|
||||
- [c6050845](https://github.com/heptio/ark/commit/c6050845) restore controller: switch to 'c' for receiver name
|
||||
- [706ae07d](https://github.com/heptio/ark/commit/706ae07d) enable a schedule to be provided as the source for a restore
|
||||
- [aea68414](https://github.com/heptio/ark/commit/aea68414) fix up Slack link in troubleshooting on master branch
|
||||
- [bb8e2e91](https://github.com/heptio/ark/commit/bb8e2e91) Document how to run the Ark server locally
|
||||
- [dc84e591](https://github.com/heptio/ark/commit/dc84e591) Remove outdated namespace deletion content
|
||||
- [23abbc9a](https://github.com/heptio/ark/commit/23abbc9a) fix paths
|
||||
- [f0426538](https://github.com/heptio/ark/commit/f0426538) use posix-compliant conditional for checking TAG_LATEST
|
||||
- [cf336d80](https://github.com/heptio/ark/commit/cf336d80) Added new templates
|
||||
- [795dc262](https://github.com/heptio/ark/commit/795dc262) replace pkg/restore's osFileSystem with pkg/util/filesystem's
|
||||
- [eabef085](https://github.com/heptio/ark/commit/eabef085) Update generated Ark code based on the 1.11 k8s.io/code-generator script
|
||||
- [f5eac0b4](https://github.com/heptio/ark/commit/f5eac0b4) Update vendored library code for Kubernetes 1.11
|
||||
|
||||
[1]: https://github.com/heptio/ark/blob/master/docs/upgrading-to-v0.10.md
|
||||
[2]: locations.md
|
||||
39
changelogs/CHANGELOG-0.3.md
Normal file
39
changelogs/CHANGELOG-0.3.md
Normal file
@@ -0,0 +1,39 @@
|
||||
- [v0.3.3](#v033)
|
||||
- [v0.3.2](#v032)
|
||||
- [v0.3.1](#v031)
|
||||
- [v0.3.0](#v030)
|
||||
|
||||
## v0.3.3
|
||||
#### 2017-08-10
|
||||
### Download
|
||||
- https://github.com/heptio/ark/tree/v0.3.3
|
||||
|
||||
### Bug Fixes
|
||||
* Treat the first field in a schedule's cron expression as minutes, not seconds
|
||||
|
||||
|
||||
## v0.3.2
|
||||
#### 2017-08-07
|
||||
### Download
|
||||
- https://github.com/heptio/ark/tree/v0.3.2
|
||||
|
||||
### New Features
|
||||
* Add client-go auth provider plugins for Azure, GCP, OIDC
|
||||
|
||||
|
||||
## v0.3.1
|
||||
#### 2017-08-03
|
||||
### Download
|
||||
- https://github.com/heptio/ark/tree/v0.3.1
|
||||
|
||||
### Bug Fixes
|
||||
* Fix Makefile VERSION
|
||||
|
||||
|
||||
## v0.3.0
|
||||
#### 2017-08-03
|
||||
### Download
|
||||
- https://github.com/heptio/ark/tree/v0.3.0
|
||||
|
||||
### All New Features
|
||||
* Initial Release
|
||||
29
changelogs/CHANGELOG-0.4.md
Normal file
29
changelogs/CHANGELOG-0.4.md
Normal file
@@ -0,0 +1,29 @@
|
||||
- [v0.4.0](#v040)
|
||||
|
||||
## v0.4.0
|
||||
#### 2017-09-14
|
||||
### Download
|
||||
- https://github.com/heptio/ark/tree/v0.4.0
|
||||
|
||||
### Breaking changes
|
||||
* Snapshotting and restoring volumes is now enabled by default
|
||||
* The --namespaces flag for 'ark restore create' has been replaced by --include-namespaces and
|
||||
--exclude-namespaces
|
||||
|
||||
### New features
|
||||
* Support for S3 SSE with KMS
|
||||
* Cloud provider configurations are validated at startup
|
||||
* The persistentVolumeProvider is now optional
|
||||
* Restore objects are garbage collected
|
||||
* Each backup now has an associated log file, viewable via 'ark backup logs'
|
||||
* Each restore now has an associated log file, viewable via 'ark restore logs'
|
||||
* Add --include-resources/--exclude-resources for restores
|
||||
|
||||
### Bug fixes
|
||||
* Only save/use iops for io1 volumes on AWS
|
||||
* When restoring, try to retrieve the Backup directly from object storage if it's not found
|
||||
* When syncing Backups from object storage to Kubernetes, don't return at the first error
|
||||
encountered
|
||||
* More closely match how kubectl performs kubeconfig resolution
|
||||
* Increase default Azure API request timeout to 2 minutes
|
||||
* Update Azure diskURI to match diskName
|
||||
41
changelogs/CHANGELOG-0.5.md
Normal file
41
changelogs/CHANGELOG-0.5.md
Normal file
@@ -0,0 +1,41 @@
|
||||
- [v0.5.1](#v051)
|
||||
- [v0.5.0](#v050)
|
||||
|
||||
## v0.5.1
|
||||
#### 2017-11-06
|
||||
### Download
|
||||
- https://github.com/heptio/ark/tree/v0.5.1
|
||||
|
||||
### Bug fixes
|
||||
* If a Service is headless, retain ClusterIP = None when backing up and restoring.
|
||||
* Use the specifed --label-selector when listing backups, schedules, and restores.
|
||||
* Restore namespace mapping functionality that was accidentally broken in 0.5.0.
|
||||
* Always include namespaces in the backup, regardless of the --include-cluster-resources setting.
|
||||
|
||||
|
||||
## v0.5.0
|
||||
#### 2017-10-26
|
||||
### Download
|
||||
- https://github.com/heptio/ark/tree/v0.5.0
|
||||
|
||||
### Breaking changes
|
||||
* The backup tar file format has changed. Backups created using previous versions of Ark cannot be restored using v0.5.0.
|
||||
* When backing up one or more specific namespaces, cluster-scoped resources are no longer backed up by default, with the exception of PVs that are used within the target namespace(s). Cluster-scoped resources can still be included by explicitly specifying `--include-cluster-resources`.
|
||||
|
||||
### New features
|
||||
* Add customized user-agent string for Ark CLI
|
||||
* Switch from glog to logrus
|
||||
* Exclude nodes from restoration
|
||||
* Add a FAQ
|
||||
* Record PV availability zone and use it when restoring volumes from snapshots
|
||||
* Back up the PV associated with a PVC
|
||||
* Add `--include-cluster-resources` flag to `ark backup create`
|
||||
* Add `--include-cluster-resources` flag to `ark restore create`
|
||||
* Properly support resource restore priorities across cluster-scoped and namespace-scoped resources
|
||||
* Support `ark create ...` and `ark get ...`
|
||||
* Make ark run as cluster-admin
|
||||
* Add pod exec backup hooks
|
||||
* Support cross-compilation & upgrade to go 1.9
|
||||
|
||||
### Bug fixes
|
||||
* Make config change detection more robust
|
||||
31
changelogs/CHANGELOG-0.6.md
Normal file
31
changelogs/CHANGELOG-0.6.md
Normal file
@@ -0,0 +1,31 @@
|
||||
- [v0.6.0](#v060)
|
||||
|
||||
## v0.6.0
|
||||
#### 2017-11-30
|
||||
### Download
|
||||
- https://github.com/heptio/ark/tree/v0.6.0
|
||||
|
||||
### Highlights
|
||||
* **Plugins** - We now support user-defined plugins that can extend Ark functionality to meet your custom backup/restore needs without needing to be compiled into the core binary. We support pluggable block and object stores as well as per-item backup and restore actions that can execute arbitrary logic, including modifying the items being backed up or restored. For more information see the [documentation](docs/plugins.md), which includes a reference to a fully-functional sample plugin repository. (#174 #188 #206 #213 #215 #217 #223 #226)
|
||||
* **Describers** - The Ark CLI now includes `describe` commands for `backups`, `restores`, and `schedules` that provide human-friendly representations of the relevant API objects.
|
||||
|
||||
### Breaking Changes
|
||||
* The config object format has changed. In order to upgrade to v0.6.0, the config object will have to be updated to match the new format. See the [examples](examples) and [documentation](docs/config-definition.md) for more information.
|
||||
* The restore object format has changed. The `warnings` and `errors` fields are now ints containing the counts, while full warnings and errors are now stored in the object store instead of etcd. Restore objects created prior to v.0.6.0 should be deleted, or a new bucket used, and the old restore objects deleted from Kubernetes (`kubectl -n heptio-ark delete restore --all`).
|
||||
|
||||
### All New Features
|
||||
* Add `ark plugin add` and `ark plugin remove` commands #217, @skriss
|
||||
* Add plugin support for block/object stores, backup/restore item actions #174 #188 #206 #213 #215 #223 #226, @skriss @ncdc
|
||||
* Improve Azure deployment instructions #216, @ncdc
|
||||
* Change default TTL for backups to 30 days #204, @nrb
|
||||
* Improve logging for backups and restores #199, @ncdc
|
||||
* Add `ark backup describe`, `ark schedule describe` #196, @ncdc
|
||||
* Add `ark restore describe` and move restore warnings/errors to object storage #173 #201 #202, @ncdc
|
||||
* Upgrade to client-go v5.0.1, kubernetes v1.8.2 #157, @ncdc
|
||||
* Add Travis CI support #165 #166, @ncdc
|
||||
|
||||
### Bug Fixes
|
||||
* Fix log location hook prefix stripping #222, @ncdc
|
||||
* When running `ark backup download`, remove file if there's an error #154, @ncdc
|
||||
* Update documentation for AWS KMS Key alias support #163, @lli-hiya
|
||||
* Remove clock from `volume_snapshot_action` #137, @athampy
|
||||
30
changelogs/CHANGELOG-0.7.md
Normal file
30
changelogs/CHANGELOG-0.7.md
Normal file
@@ -0,0 +1,30 @@
|
||||
- [v0.7.1](#v071)
|
||||
- [v0.7.0](#v070)
|
||||
|
||||
## v0.7.1
|
||||
#### 2018-02-22
|
||||
### Download
|
||||
- https://github.com/heptio/ark/releases/tag/v0.7.1
|
||||
|
||||
### Bug Fixes:
|
||||
* Run the Ark server in its own namespace, separate from backups/schedules/restores/config (#322, @ncdc)
|
||||
|
||||
|
||||
## v0.7.0
|
||||
#### 2018-02-15
|
||||
### Download
|
||||
- https://github.com/heptio/ark/releases/tag/v0.7.0
|
||||
|
||||
### New Features:
|
||||
* Run the Ark server in any namespace (#272, @ncdc)
|
||||
* Add ability to delete backups and their associated data (#252, @skriss)
|
||||
* Support both pre and post backup hooks (#243, @ncdc)
|
||||
|
||||
### Bug Fixes / Other Changes:
|
||||
* Switch from Update() to Patch() when updating Ark resources (#241, @skriss)
|
||||
* Don't fail the backup if a PVC is not bound to a PV (#256, @skriss)
|
||||
* Restore serviceaccounts prior to workload controllers (#258, @ncdc)
|
||||
* Stop removing annotations from PVs when restoring them (#263, @skriss)
|
||||
* Update GCP client libraries (#249, @skriss)
|
||||
* Clarify backup and restore creation messages (#270, @nrb)
|
||||
* Update S3 bucket creation docs for us-east-1 (#285, @lypht)
|
||||
100
changelogs/CHANGELOG-0.8.md
Normal file
100
changelogs/CHANGELOG-0.8.md
Normal file
@@ -0,0 +1,100 @@
|
||||
- [v0.8.3](#v083)
|
||||
- [v0.8.2](#v082)
|
||||
- [v0.8.1](#v081)
|
||||
- [v0.8.0](#v080)
|
||||
|
||||
## v0.8.3
|
||||
#### 2018-06-29
|
||||
### Download
|
||||
- https://github.com/heptio/ark/releases/tag/v0.8.3
|
||||
|
||||
### Bug Fixes:
|
||||
* Don't restore backup and restore resources to avoid possible data corruption (#622, @ncdc)
|
||||
|
||||
|
||||
## v0.8.2
|
||||
#### 2018-06-01
|
||||
### Download
|
||||
- https://github.com/heptio/ark/releases/tag/v0.8.2
|
||||
|
||||
### Bug Fixes:
|
||||
* Don't crash when a persistent volume claim is missing spec.volumeName (#520, @ncdc)
|
||||
|
||||
|
||||
## v0.8.1
|
||||
#### 2018-04-23
|
||||
### Download
|
||||
- https://github.com/heptio/ark/releases/tag/v0.8.1
|
||||
|
||||
### Bug Fixes:
|
||||
* Azure: allow pre-v0.8.0 backups with disk snapshots to be restored and deleted (#446 #449, @skriss)
|
||||
|
||||
|
||||
## v0.8.0
|
||||
#### 2018-04-19
|
||||
### Download
|
||||
- https://github.com/heptio/ark/releases/tag/v0.8.0
|
||||
|
||||
### Highlights:
|
||||
* Backup deletion has been completely revamped to make it simpler and less error-prone. As a user, you still use the `ark backup delete` command to request deletion of a backup and its associated cloud
|
||||
resources; behind the scenes, we've switched to using a new `DeleteBackupRequest` Custom Resource and associated controller for processing deletion requests.
|
||||
* We've reduced the number of required fields in the Ark config. For Azure, `location` is no longer required, and for GCP, `project` is not needed.
|
||||
* Ark now copies tags from volumes to snapshots during backup, and from snapshots to new volumes during restore.
|
||||
|
||||
### Breaking Changes:
|
||||
* Ark has moved back to a single namespace (`heptio-ark` by default) as part of #383.
|
||||
|
||||
### All New Features:
|
||||
* Add global `--kubecontext` flag to Ark CLI (#296, @blakebarnett)
|
||||
* Azure: support cross-resource group restores of volumes (#356 #378, @skriss)
|
||||
* AWS/Azure/GCP: copy tags from volumes to snapshots, and from snapshots to volumes (#341, @skriss)
|
||||
* Replace finalizer for backup deletion with `DeleteBackupRequest` custom resource & controller (#383 #431, @ncdc @nrb)
|
||||
* Don't log warnings during restore if an identical object already exists in the cluster (#405, @nrb)
|
||||
* Add bash & zsh completion support (#384, @containscafeine)
|
||||
|
||||
### Bug Fixes / Other Changes:
|
||||
* Error from the Ark CLI if attempting to restore a non-existent backup (#302, @ncdc)
|
||||
* Enable running the Ark server locally for development purposes (#334, @ncdc)
|
||||
* Add examples to `ark schedule create` documentation (#331, @lypht)
|
||||
* GCP: Remove `project` requirement from Ark config (#345, @skriss)
|
||||
* Add `--from-backup` flag to `ark restore create` and allow custom restore names (#342 #409, @skriss)
|
||||
* Azure: remove `location` requirement from Ark config (#344, @skriss)
|
||||
* Add documentation/examples for storing backups in IBM Cloud Object Storage (#321, @roytman)
|
||||
* Reduce verbosity of hooks logging (#362, @skriss)
|
||||
* AWS: Add minimal IAM policy to documentation (#363 #419, @hopkinsth)
|
||||
* Don't restore events (#374, @sanketjpatel)
|
||||
* Azure: reduce API polling interval from 60s to 5s (#359, @skriss)
|
||||
* Switch from hostPath to emptyDir volume type for minio example (#386, @containscafeine)
|
||||
* Add limit ranges as a prioritized resource for restores (#392, @containscafeine)
|
||||
* AWS: Add documentation on using Ark with kube2iam (#402, @domderen)
|
||||
* Azure: add node selector so Ark pod is scheduled on a linux node (#415, @ffd2subroutine)
|
||||
* Error from the Ark CLI if attempting to get logs for a non-existent restore (#391, @containscafeine)
|
||||
* GCP: Add minimal IAM policy to documentation (#429, @skriss @jody-frankowski)
|
||||
|
||||
### Upgrading from v0.7.1:
|
||||
Ark v0.7.1 moved the Ark server deployment into a separate namespace, `heptio-ark-server`. As of v0.8.0 we've
|
||||
returned to a single namespace, `heptio-ark`, for all Ark-related resources. If you're currently running v0.7.1,
|
||||
here are the steps you can take to upgrade:
|
||||
|
||||
1. Execute the steps from the **Credentials and configuration** section for your cloud:
|
||||
* [AWS](https://heptio.github.io/ark/v0.8.0/aws-config#credentials-and-configuration)
|
||||
* [Azure](https://heptio.github.io/ark/v0.8.0/azure-config#credentials-and-configuration)
|
||||
* [GCP](https://heptio.github.io/ark/v0.8.0/gcp-config#credentials-and-configuration)
|
||||
|
||||
When you get to the secret creation step, if you don't have your `credentials-ark` file handy,
|
||||
you can copy the existing secret from your `heptio-ark-server` namespace into the `heptio-ark` namespace:
|
||||
```bash
|
||||
kubectl get secret/cloud-credentials -n heptio-ark-server --export -o json | \
|
||||
jq '.metadata.namespace="heptio-ark"' | \
|
||||
kubectl apply -f -
|
||||
```
|
||||
|
||||
2. You can now safely delete the `heptio-ark-server` namespace:
|
||||
```bash
|
||||
kubectl delete namespace heptio-ark-server
|
||||
```
|
||||
|
||||
3. Execute the commands from the **Start the server** section for your cloud:
|
||||
* [AWS](https://heptio.github.io/ark/v0.8.0/aws-config#start-the-server)
|
||||
* [Azure](https://heptio.github.io/ark/v0.8.0/azure-config#start-the-server)
|
||||
* [GCP](https://heptio.github.io/ark/v0.8.0/gcp-config#start-the-server)
|
||||
181
changelogs/CHANGELOG-0.9.md
Normal file
181
changelogs/CHANGELOG-0.9.md
Normal file
@@ -0,0 +1,181 @@
|
||||
- [v0.9.11](#v0911)
|
||||
- [v0.9.10](#v0910)
|
||||
- [v0.9.9](#v099)
|
||||
- [v0.9.8](#v098)
|
||||
- [v0.9.7](#v097)
|
||||
- [v0.9.6](#v096)
|
||||
- [v0.9.5](#v095)
|
||||
- [v0.9.4](#v094)
|
||||
- [v0.9.3](#v093)
|
||||
- [v0.9.2](#v092)
|
||||
- [v0.9.1](#v091)
|
||||
- [v0.9.0](#v090)
|
||||
|
||||
## v0.9.11
|
||||
#### 2018-11-08
|
||||
### Download
|
||||
- https://github.com/heptio/ark/releases/tag/v0.9.11
|
||||
|
||||
### Bug Fixes
|
||||
* Fix bug preventing PV snapshots from being restored (#1040, @ncdc)
|
||||
|
||||
|
||||
## v0.9.10
|
||||
#### 2018-11-01
|
||||
### Download
|
||||
- https://github.com/heptio/ark/releases/tag/v0.9.10
|
||||
|
||||
### Bug Fixes
|
||||
* restore storageclasses before pvs and pvcs (#594, @shubheksha)
|
||||
* AWS: Ensure that the order returned by ListObjects is consistent (#999, @bashofmann)
|
||||
* Add CRDs to list of prioritized resources (#424, @domenicrosati)
|
||||
* Verify PV doesn't exist before creating new volume (#609, @nrb)
|
||||
* Update README.md - Grammar mistake corrected (#1018, @midhunbiju)
|
||||
|
||||
|
||||
## v0.9.9
|
||||
#### 2018-10-24
|
||||
### Download
|
||||
- https://github.com/heptio/ark/releases/tag/v0.9.9
|
||||
|
||||
### Bug Fixes
|
||||
* Check if initContainers key exists before attempting to remove volume mounts. (#927, @skriss)
|
||||
|
||||
|
||||
## v0.9.8
|
||||
#### 2018-10-18
|
||||
### Download
|
||||
- https://github.com/heptio/ark/releases/tag/v0.9.8
|
||||
|
||||
### Bug Fixes
|
||||
* Discard service account token volume mounts from init containers on restore (#910, @james-powis)
|
||||
* Support --include-cluster-resources flag when creating schedule (#942, @captjt)
|
||||
* Remove logic to get a GCP project (#926, @shubheksha)
|
||||
* Only try to back up PVCs linked PV if the PVC's phase is Bound (#920, @skriss)
|
||||
* Claim ownership of new AWS volumes on Kubernetes cluster being restored into (#801, @ljakimczuk)
|
||||
* Remove timeout check when taking snapshots (#928, @carlisia)
|
||||
|
||||
|
||||
## v0.9.7
|
||||
#### 2018-10-04
|
||||
### Download
|
||||
- https://github.com/heptio/ark/releases/tag/v0.9.7
|
||||
|
||||
### Bug Fixes
|
||||
* Preserve explicitly-specified node ports during restore (#712, @timoreimann)
|
||||
* Enable restoring resources with ownerReference set (#837, @mwieczorek)
|
||||
* Fix error when restoring ExternalName services (#869, @shubheksha)
|
||||
* remove restore log helper for accurate line numbers (#891, @skriss)
|
||||
* Display backup StartTimestamp in `ark backup get` output (#894, @marctc)
|
||||
* Fix restic restores when using namespace mappings (#900, @skriss)
|
||||
|
||||
|
||||
## v0.9.6
|
||||
#### 2018-09-21
|
||||
### Download
|
||||
- https://github.com/heptio/ark/releases/tag/v0.9.6
|
||||
|
||||
### Bug Fixes
|
||||
* Discard service account tokens from non-default service accounts on restore (#843, @james-powis)
|
||||
* Update Docker images to use `alpine:3.8` (#852, @nrb)
|
||||
|
||||
|
||||
## v0.9.5
|
||||
#### 2018-09-17
|
||||
### Download
|
||||
- https://github.com/heptio/ark/releases/tag/v0.9.5
|
||||
|
||||
### Bug Fixes
|
||||
* Fix issue causing restic restores not to work (#834, @skriss)
|
||||
|
||||
|
||||
## v0.9.4
|
||||
#### 20180-09-05
|
||||
### Download
|
||||
- https://github.com/heptio/ark/releases/tag/v0.9.4
|
||||
|
||||
### Bug Fixes
|
||||
* Terminate plugin clients to resolve memory leaks (#797, @skriss)
|
||||
* Fix nil map errors when merging annotations (#812, @nrb)
|
||||
|
||||
|
||||
## v0.9.3
|
||||
#### 2018-08-10
|
||||
### Download
|
||||
- https://github.com/heptio/ark/releases/tag/v0.9.3
|
||||
### Bug Fixes
|
||||
* Initalize Prometheus metrics when creating a new schedule (#689, @lemaral)
|
||||
|
||||
|
||||
## v0.9.2
|
||||
#### 2018-07-26
|
||||
### Download
|
||||
- https://github.com/heptio/ark/releases/tag/v0.9.2) - 2018-07-26
|
||||
|
||||
### Bug Fixes:
|
||||
* Fix issue where modifications made by backup item actions were not being saved to backup tarball (#704, @skriss)
|
||||
|
||||
|
||||
## v0.9.1
|
||||
#### 2018-07-23
|
||||
### Download
|
||||
- https://github.com/heptio/ark/releases/tag/v0.9.1
|
||||
|
||||
### Bug Fixes:
|
||||
* Require namespace for Ark's CRDs to already exist at server startup (#676, @skriss)
|
||||
* Require all Ark CRDs to exist at server startup (#683, @skriss)
|
||||
* Fix `latest` tagging in Makefile (#690, @skriss)
|
||||
* Make Ark compatible with clusters that don't have the `rbac.authorization.k8s.io/v1` API group (#682, @nrb)
|
||||
* Don't consider missing snapshots an error during backup deletion, limit backup deletion requests per backup to 1 (#687, @skriss)
|
||||
|
||||
|
||||
## v0.9.0
|
||||
#### 2018-07-06
|
||||
### Download
|
||||
- https://github.com/heptio/ark/releases/tag/v0.9.0
|
||||
|
||||
### Highlights:
|
||||
* Ark now has support for backing up and restoring Kubernetes volumes using a free open-source backup tool called [restic](https://github.com/restic/restic).
|
||||
This provides users an out-of-the-box solution for backing up and restoring almost any type of Kubernetes volume, whether or not it has snapshot support
|
||||
integrated with Ark. For more information, see the [documentation](https://github.com/heptio/ark/blob/master/docs/restic.md).
|
||||
* Support for Prometheus metrics has been added! View total number of backup attempts (including success or failure), total backup size in bytes, and backup
|
||||
durations. More metrics coming in future releases!
|
||||
|
||||
### All New Features:
|
||||
* Add restic support (#508 #532 #533 #534 #535 #537 #540 #541 #545 #546 #547 #548 #555 #557 #561 #563 #569 #570 #571 #606 #608 #610 #621 #631 #636, @skriss)
|
||||
* Add prometheus metrics (#531 #551 #564, @ashish-amarnath @nrb)
|
||||
* When backing up a service account, include cluster roles/cluster role bindings that reference it (#470, @skriss)
|
||||
* When restoring service accounts, copy secrets/image pull secrets into the target cluster even if the service account already exists (#403, @nrb)
|
||||
|
||||
### Bug Fixes / Other Changes:
|
||||
* Upgrade to Kubernetes 1.10 dependencies (#417, @skriss)
|
||||
* Upgrade to go 1.10 and alpine 3.7 (#456, @skriss)
|
||||
* Display no excluded resources/namespaces as `<none>` rather than `*` (#453, @nrb)
|
||||
* Skip completed jobs and pods when restoring (#463, @nrb)
|
||||
* Set namespace correctly when syncing backups from object storage (#472, @skriss)
|
||||
* When building on macOS, bind-mount volumes with delegated config (#478, @skriss)
|
||||
* Add replica sets and daemonsets to cohabitating resources so they're not backed up twice (#482 #485, @skriss)
|
||||
* Shut down the Ark server gracefully on SIGINT/SIGTERM (#483, @skriss)
|
||||
* Only back up resources that support GET and DELETE in addition to LIST and CREATE (#486, @nrb)
|
||||
* Show a better error message when trying to get an incomplete restore's logs (#496, @nrb)
|
||||
* Stop processing when setting a backup deletion request's phase to `Deleting` fails (#500, @nrb)
|
||||
* Add library code to install Ark's server components (#437 #506, @marpaia)
|
||||
* Properly handle errors when backing up additional items (#512, @carlpett)
|
||||
* Run post hooks even if backup actions fail (#514, @carlpett)
|
||||
* GCP: fail backup if upload to object storage fails (#510, @nrb)
|
||||
* AWS: don't require `region` as part of backup storage provider config (#455, @skriss)
|
||||
* Ignore terminating resources while doing a backup (#526, @yastij)
|
||||
* Log to stdout instead of stderr (#553, @ncdc)
|
||||
* Move sample minio deployment's config to an emptyDir (#566, @runyontr)
|
||||
* Add `omitempty` tag to optional API fields (@580, @nikhita)
|
||||
* Don't restore PVs with a reclaim policy of `Delete` and no snapshot (#613, @ncdc)
|
||||
* Don't restore mirror pods (#619, @ncdc)
|
||||
|
||||
### Docs Contributors:
|
||||
* @gianrubio
|
||||
* @castrojo
|
||||
* @dhananjaysathe
|
||||
* @c-knowles
|
||||
* @mattkelly
|
||||
* @ae-v
|
||||
* @hamidzr
|
||||
0
changelogs/unreleased/.keep
Normal file
0
changelogs/unreleased/.keep
Normal file
77
cmd/ark-restic-restore-helper/main.go
Normal file
77
cmd/ark-restic-restore-helper/main.go
Normal file
@@ -0,0 +1,77 @@
|
||||
/*
|
||||
Copyright 2018 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if len(os.Args) != 2 {
|
||||
fmt.Fprintln(os.Stderr, "ERROR: exactly one argument must be provided, the restore's UID")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if done() {
|
||||
fmt.Println("All restic restores are done")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// done returns true if for each directory under /restores, a file exists
|
||||
// within the .ark/ subdirectory whose name is equal to os.Args[1], or
|
||||
// false otherwise
|
||||
func done() bool {
|
||||
children, err := ioutil.ReadDir("/restores")
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERROR reading /restores directory: %s\n", err)
|
||||
return false
|
||||
}
|
||||
|
||||
for _, child := range children {
|
||||
if !child.IsDir() {
|
||||
fmt.Printf("%s is not a directory, skipping.\n", child.Name())
|
||||
continue
|
||||
}
|
||||
|
||||
doneFile := filepath.Join("/restores", child.Name(), ".ark", os.Args[1])
|
||||
|
||||
if _, err := os.Stat(doneFile); os.IsNotExist(err) {
|
||||
fmt.Printf("Not found: %s\n", doneFile)
|
||||
return false
|
||||
} else if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERROR looking for %s: %s\n", doneFile, err)
|
||||
return false
|
||||
}
|
||||
|
||||
fmt.Printf("Found %s", doneFile)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2017 Heptio Inc.
|
||||
Copyright 2017 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
# Table of Contents
|
||||
|
||||
## User Guide
|
||||
|
||||
* [Concepts][1]
|
||||
* [Build from scratch][0]
|
||||
* [Cloud provider specifics][9]
|
||||
* [Debugging restores][4]
|
||||
* [FAQ][10]
|
||||
|
||||
## Reference
|
||||
* [CLI reference][2]
|
||||
* [Config definition][5]
|
||||
* [Output file format][6]
|
||||
* [Sample YAML files][3]
|
||||
|
||||
## Scenarios
|
||||
* [Disaster recovery][7]
|
||||
* [Cluster migration][8]
|
||||
|
||||
[0]: build-from-scratch.md
|
||||
[1]: concepts.md
|
||||
[2]: cli-reference
|
||||
[3]: /examples
|
||||
[4]: debugging-restores.md
|
||||
[5]: config-definition.md
|
||||
[6]: output-file-format.md
|
||||
[7]: use-cases.md#disaster-recovery
|
||||
[8]: use-cases.md#cluster-migration
|
||||
[9]: cloud-provider-specifics.md
|
||||
[10]: faq.md
|
||||
80
docs/about.md
Normal file
80
docs/about.md
Normal file
@@ -0,0 +1,80 @@
|
||||
# How Ark Works
|
||||
|
||||
Each Ark operation -- on-demand backup, scheduled backup, restore -- is a custom resource, defined with a Kubernetes [Custom Resource Definition (CRD)][20] and stored in [etcd][22]. Ark also includes controllers that process the custom resources to perform backups, restores, and all related operations.
|
||||
|
||||
You can back up or restore all objects in your cluster, or you can filter objects by type, namespace, and/or label.
|
||||
|
||||
Ark is ideal for the disaster recovery use case, as well as for snapshotting your application state, prior to performing system operations on your cluster (e.g. upgrades).
|
||||
|
||||
## On-demand backups
|
||||
|
||||
The **backup** operation:
|
||||
|
||||
1. Uploads a tarball of copied Kubernetes objects into cloud object storage.
|
||||
|
||||
1. Calls the cloud provider API to make disk snapshots of persistent volumes, if specified.
|
||||
|
||||
You can optionally specify hooks to be executed during the backup. For example, you might
|
||||
need to tell a database to flush its in-memory buffers to disk before taking a snapshot. [More about hooks][10].
|
||||
|
||||
Note that cluster backups are not strictly atomic. If Kubernetes objects are being created or edited at the time of backup, they might not be included in the backup. The odds of capturing inconsistent information are low, but it is possible.
|
||||
|
||||
## Scheduled backups
|
||||
|
||||
The **schedule** operation allows you to back up your data at recurring intervals. The first backup is performed when the schedule is first created, and subsequent backups happen at the schedule's specified interval. These intervals are specified by a Cron expression.
|
||||
|
||||
Scheduled backups are saved with the name `<SCHEDULE NAME>-<TIMESTAMP>`, where `<TIMESTAMP>` is formatted as *YYYYMMDDhhmmss*.
|
||||
|
||||
## Restores
|
||||
|
||||
The **restore** operation allows you to restore all of the objects and persistent volumes from a previously created backup. You can also restore only a filtered subset of objects and persistent volumes. Ark supports multiple namespace remapping--for example, in a single restore, objects in namespace "abc" can be recreated under namespace "def", and the objects in namespace "123" under "456".
|
||||
|
||||
The default name of a restore is `<BACKUP NAME>-<TIMESTAMP>`, where `<TIMESTAMP>` is formatted as *YYYYMMDDhhmmss*. You can also specify a custom name. A restored object also includes a label with key `ark.heptio.com/restore-name` and value `<RESTORE NAME>`.
|
||||
|
||||
You can also run the Ark server in restore-only mode, which disables backup, schedule, and garbage collection functionality during disaster recovery.
|
||||
|
||||
## Backup workflow
|
||||
|
||||
When you run `ark backup create test-backup`:
|
||||
|
||||
1. The Ark client makes a call to the Kubernetes API server to create a `Backup` object.
|
||||
|
||||
1. The `BackupController` notices the new `Backup` object and performs validation.
|
||||
|
||||
1. The `BackupController` begins the backup process. It collects the data to back up by querying the API server for resources.
|
||||
|
||||
1. The `BackupController` makes a call to the object storage service -- for example, AWS S3 -- to upload the backup file.
|
||||
|
||||
By default, `ark backup create` makes disk snapshots of any persistent volumes. You can adjust the snapshots by specifying additional flags. Run `ark backup create --help` to see available flags. Snapshots can be disabled with the option `--snapshot-volumes=false`.
|
||||
|
||||
![19]
|
||||
|
||||
## Backed-up API versions
|
||||
|
||||
Ark backs up resources using the Kubernetes API server's *preferred version* for each group/resource. When restoring a resource, this same API group/version must exist in the target cluster in order for the restore to be successful.
|
||||
|
||||
For example, if the cluster being backed up has a `gizmos` resource in the `things` API group, with group/versions `things/v1alpha1`, `things/v1beta1`, and `things/v1`, and the server's preferred group/version is `things/v1`, then all `gizmos` will be backed up from the `things/v1` API endpoint. When backups from this cluster are restored, the target cluster **must** have the `things/v1` endpoint in order for `gizmos` to be restored. Note that `things/v1` **does not** need to be the preferred version in the target cluster; it just needs to exist.
|
||||
|
||||
## Set a backup to expire
|
||||
|
||||
When you create a backup, you can specify a TTL by adding the flag `--ttl <DURATION>`. If Ark sees that an existing backup resource is expired, it removes:
|
||||
|
||||
* The backup resource
|
||||
* The backup file from cloud object storage
|
||||
* All PersistentVolume snapshots
|
||||
* All associated Restores
|
||||
|
||||
## Object storage sync
|
||||
|
||||
Heptio Ark treats object storage as the source of truth. It continuously checks to see that the correct backup resources are always present. If there is a properly formatted backup file in the storage bucket, but no corresponding backup resource in the Kubernetes API, Ark synchronizes the information from object storage to Kubernetes.
|
||||
|
||||
This allows restore functionality to work in a cluster migration scenario, where the original backup objects do not exist in the new cluster.
|
||||
|
||||
Likewise, if a backup object exists in Kubernetes but not in object storage, it will be deleted from Kubernetes since the backup tarball no longer exists.
|
||||
|
||||
[10]: hooks.md
|
||||
[19]: /docs/img/backup-process.png
|
||||
[20]: https://kubernetes.io/docs/concepts/api-extension/custom-resources/#customresourcedefinitions
|
||||
[21]: https://kubernetes.io/docs/concepts/api-extension/custom-resources/#custom-controllers
|
||||
[22]: https://github.com/coreos/etcd
|
||||
|
||||
@@ -2,6 +2,9 @@
|
||||
|
||||
## API types
|
||||
|
||||
Here we list the API types that have some functionality that you can only configure via json/yaml vs the `ark` cli
|
||||
(hooks)
|
||||
|
||||
* [Backup][1]
|
||||
|
||||
[1]: backup.md
|
||||
|
||||
@@ -22,7 +22,7 @@ kind: Backup
|
||||
metadata:
|
||||
# Backup name. May be any valid Kubernetes object name. Required.
|
||||
name: a
|
||||
# Backup namespace. Must be heptio-ark. Required.
|
||||
# Backup namespace. Required. In version 0.7.0 and later, can be any string. Must be the namespace of the Ark server.
|
||||
namespace: heptio-ark
|
||||
# Parameters about the backup. Required.
|
||||
spec:
|
||||
@@ -60,6 +60,12 @@ spec:
|
||||
# AWS. Valid values are true, false, and null/unset. If unset, Ark performs snapshots as long as
|
||||
# a persistent volume provider is configured for Ark.
|
||||
snapshotVolumes: null
|
||||
# Where to store the tarball and logs.
|
||||
storageLocation: aws-primary
|
||||
# The list of locations in which to store volume snapshots created for this backup.
|
||||
volumeSnapshotLocations:
|
||||
- aws-primary
|
||||
- gcp-primary
|
||||
# The amount of time before this backup is eligible for garbage collection.
|
||||
ttl: 24h0m0s
|
||||
# Actions to perform at different times during a backup. The only hook currently supported is
|
||||
@@ -88,8 +94,12 @@ spec:
|
||||
matchLabels:
|
||||
app: ark
|
||||
component: server
|
||||
# An array of hooks to run. Currently only "exec" hooks are supported.
|
||||
# An array of hooks to run before executing custom actions. Currently only "exec" hooks are supported.
|
||||
# DEPRECATED. Use pre instead.
|
||||
hooks:
|
||||
# Same content as pre below.
|
||||
# An array of hooks to run before executing custom actions. Currently only "exec" hooks are supported.
|
||||
pre:
|
||||
-
|
||||
# The type of hook. This must be "exec".
|
||||
exec:
|
||||
@@ -105,6 +115,10 @@ spec:
|
||||
onError: Fail
|
||||
# How long to wait for the command to finish executing. Defaults to 30 seconds. Optional.
|
||||
timeout: 10s
|
||||
# An array of hooks to run after all custom actions and additional items have been
|
||||
# processed. Currently only "exec" hooks are supported.
|
||||
post:
|
||||
# Same content as pre above.
|
||||
# Status about the Backup. Users should not set any data here.
|
||||
status:
|
||||
# The date and time when the Backup is eligible for garbage collection.
|
||||
|
||||
73
docs/api-types/backupstoragelocation.md
Normal file
73
docs/api-types/backupstoragelocation.md
Normal file
@@ -0,0 +1,73 @@
|
||||
# Ark Backup Storage Locations
|
||||
|
||||
## Backup Storage Location
|
||||
|
||||
Ark can store backups in a number of locations. These are represented in the cluster via the `BackupStorageLocation` CRD.
|
||||
|
||||
Ark must have at least one `BackupStorageLocation`. By default, this is expected to be named `default`, however the name can be changed by specifying `--default-backup-storage-location` on `ark server`. Backups that do not explicitly specify a storage location will be saved to this `BackupStorageLocation`.
|
||||
|
||||
> *NOTE*: `BackupStorageLocation` takes the place of the `Config.backupStorageProvider` key as of v0.10.0
|
||||
|
||||
A sample YAML `BackupStorageLocation` looks like the following:
|
||||
|
||||
```yaml
|
||||
apiVersion: ark.heptio.com/v1
|
||||
kind: BackupStorageLocation
|
||||
metadata:
|
||||
name: default
|
||||
namespace: heptio-ark
|
||||
spec:
|
||||
provider: aws
|
||||
objectStorage:
|
||||
bucket: myBucket
|
||||
config:
|
||||
region: us-west-2
|
||||
```
|
||||
|
||||
### Parameter Reference
|
||||
|
||||
The configurable parameters are as follows:
|
||||
|
||||
#### Main config parameters
|
||||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| `provider` | String (Ark natively supports `aws`, `gcp`, and `azure`. Other providers may be available via external plugins.)| Required Field | The name for whichever cloud provider will be used to actually store the backups. |
|
||||
| `objectStorage` | ObjectStorageLocation | Specification of the object storage for the given provider. |
|
||||
| `objectStorage/bucket` | String | Required Field | The storage bucket where backups are to be uploaded. |
|
||||
| `objectStorage/prefix` | String | Optional Field | The directory inside a storage bucket where backups are to be uploaded. |
|
||||
| `config` | map[string]string<br><br>(See the corresponding [AWS][0], [GCP][1], and [Azure][2]-specific configs or your provider's documentation.) | None (Optional) | Configuration keys/values to be passed to the cloud provider for backup storage. |
|
||||
|
||||
#### AWS
|
||||
|
||||
**(Or other S3-compatible storage)**
|
||||
|
||||
##### config
|
||||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| `region` | string | Empty | *Example*: "us-east-1"<br><br>See [AWS documentation][3] for the full list.<br><br>Queried from the AWS S3 API if not provided. |
|
||||
| `s3ForcePathStyle` | bool | `false` | Set this to `true` if you are using a local storage service like Minio. |
|
||||
| `s3Url` | string | Required field for non-AWS-hosted storage| *Example*: http://minio:9000<br><br>You can specify the AWS S3 URL here for explicitness, but Ark can already generate it from `region`, and `bucket`. This field is primarily for local storage services like Minio.|
|
||||
| `publicUrl` | string | Empty | *Example*: https://minio.mycluster.com<br><br>If specified, use this instead of `s3Url` when generating download URLs (e.g., for logs). This field is primarily for local storage services like Minio.|
|
||||
| `kmsKeyId` | string | Empty | *Example*: "502b409c-4da1-419f-a16e-eif453b3i49f" or "alias/`<KMS-Key-Alias-Name>`"<br><br>Specify an [AWS KMS key][10] id or alias to enable encryption of the backups stored in S3. Only works with AWS S3 and may require explicitly granting key usage rights.|
|
||||
| `signatureVersion` | string | `"4"` | Version of the signature algorithm used to create signed URLs that are used by ark cli to download backups or fetch logs. Possible versions are "1" and "4". Usually the default version 4 is correct, but some S3-compatible providers like Quobyte only support version 1.|
|
||||
|
||||
#### Azure
|
||||
|
||||
##### config
|
||||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| `resourceGroup` | string | Required Field | Name of the resource group containing the storage account for this backup storage location. |
|
||||
| `storageAccount` | string | Required Field | Name of the storage account for this backup storage location. |
|
||||
|
||||
#### GCP
|
||||
|
||||
No parameters required.
|
||||
|
||||
[0]: #aws
|
||||
[1]: #gcp
|
||||
[2]: #azure
|
||||
[3]: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions
|
||||
[10]: http://docs.aws.amazon.com/kms/latest/developerguide/overview.html
|
||||
60
docs/api-types/volumesnapshotlocation.md
Normal file
60
docs/api-types/volumesnapshotlocation.md
Normal file
@@ -0,0 +1,60 @@
|
||||
# Ark Volume Snapshot Location
|
||||
|
||||
## Volume Snapshot Location
|
||||
|
||||
A volume snapshot location is the location in which to store the volume snapshots created for a backup.
|
||||
|
||||
Ark can be configured to take snapshots of volumes from multiple providers. Ark also allows you to configure multiple possible `VolumeSnapshotLocation` per provider, although you can only select one location per provider at backup time.
|
||||
|
||||
Each VolumeSnapshotLocation describes a provider + location. These are represented in the cluster via the `VolumeSnapshotLocation` CRD. Ark must have at least one `VolumeSnapshotLocation` per cloud provider.
|
||||
|
||||
A sample YAML `VolumeSnapshotLocation` looks like the following:
|
||||
|
||||
```yaml
|
||||
apiVersion: ark.heptio.com/v1
|
||||
kind: VolumeSnapshotLocation
|
||||
metadata:
|
||||
name: aws-default
|
||||
namespace: heptio-ark
|
||||
spec:
|
||||
provider: aws
|
||||
config:
|
||||
region: us-west-2
|
||||
```
|
||||
|
||||
### Parameter Reference
|
||||
|
||||
The configurable parameters are as follows:
|
||||
|
||||
#### Main config parameters
|
||||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| `provider` | String (Ark natively supports `aws`, `gcp`, and `azure`. Other providers may be available via external plugins.)| Required Field | The name for whichever cloud provider will be used to actually store the volume. |
|
||||
| `config` | See the corresponding [AWS][0], [GCP][1], and [Azure][2]-specific configs or your provider's documentation.
|
||||
|
||||
#### AWS
|
||||
|
||||
##### config
|
||||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| `region` | string | Empty | *Example*: "us-east-1"<br><br>See [AWS documentation][3] for the full list.<br><br>Queried from the AWS S3 API if not provided. |
|
||||
|
||||
#### Azure
|
||||
|
||||
##### config
|
||||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| `apiTimeout` | metav1.Duration | 2m0s | How long to wait for an Azure API request to complete before timeout. |
|
||||
| `resourceGroup` | string | Optional | The name of the resource group where volume snapshots should be stored, if different from the cluster's resource group. |
|
||||
|
||||
#### GCP
|
||||
|
||||
No parameters required.
|
||||
|
||||
[0]: #aws
|
||||
[1]: #gcp
|
||||
[2]: #azure
|
||||
[3]: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions
|
||||
311
docs/aws-config.md
Normal file
311
docs/aws-config.md
Normal file
@@ -0,0 +1,311 @@
|
||||
# Run Ark on AWS
|
||||
|
||||
To set up Ark on AWS, you:
|
||||
|
||||
* Create your S3 bucket
|
||||
* Create an AWS IAM user for Ark
|
||||
* Configure the server
|
||||
* Create a Secret for your credentials
|
||||
|
||||
If you do not have the `aws` CLI locally installed, follow the [user guide][5] to set it up.
|
||||
|
||||
## Create S3 bucket
|
||||
|
||||
Heptio Ark requires an object storage bucket to store backups in, preferrably unique to a single Kubernetes cluster (see the [FAQ][20] for more details). Create an S3 bucket, replacing placeholders appropriately:
|
||||
|
||||
```bash
|
||||
aws s3api create-bucket \
|
||||
--bucket <YOUR_BUCKET> \
|
||||
--region <YOUR_REGION> \
|
||||
--create-bucket-configuration LocationConstraint=<YOUR_REGION>
|
||||
```
|
||||
NOTE: us-east-1 does not support a `LocationConstraint`. If your region is `us-east-1`, omit the bucket configuration:
|
||||
|
||||
```bash
|
||||
aws s3api create-bucket \
|
||||
--bucket <YOUR_BUCKET> \
|
||||
--region us-east-1
|
||||
```
|
||||
|
||||
## Create IAM user
|
||||
|
||||
For more information, see [the AWS documentation on IAM users][14].
|
||||
|
||||
1. Create the IAM user:
|
||||
|
||||
```bash
|
||||
aws iam create-user --user-name heptio-ark
|
||||
```
|
||||
|
||||
> If you'll be using Ark to backup multiple clusters with multiple S3 buckets, it may be desirable to create a unique username per cluster rather than the default `heptio-ark`.
|
||||
|
||||
2. Attach policies to give `heptio-ark` the necessary permissions:
|
||||
|
||||
```bash
|
||||
BUCKET=<YOUR_BUCKET>
|
||||
cat > heptio-ark-policy.json <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"ec2:DescribeVolumes",
|
||||
"ec2:DescribeSnapshots",
|
||||
"ec2:CreateTags",
|
||||
"ec2:CreateVolume",
|
||||
"ec2:CreateSnapshot",
|
||||
"ec2:DeleteSnapshot"
|
||||
],
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:GetObject",
|
||||
"s3:DeleteObject",
|
||||
"s3:PutObject",
|
||||
"s3:AbortMultipartUpload",
|
||||
"s3:ListMultipartUploadParts"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::${BUCKET}/*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::${BUCKET}"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
|
||||
aws iam put-user-policy \
|
||||
--user-name heptio-ark \
|
||||
--policy-name heptio-ark \
|
||||
--policy-document file://heptio-ark-policy.json
|
||||
```
|
||||
|
||||
3. Create an access key for the user:
|
||||
|
||||
```bash
|
||||
aws iam create-access-key --user-name heptio-ark
|
||||
```
|
||||
|
||||
The result should look like:
|
||||
|
||||
```json
|
||||
{
|
||||
"AccessKey": {
|
||||
"UserName": "heptio-ark",
|
||||
"Status": "Active",
|
||||
"CreateDate": "2017-07-31T22:24:41.576Z",
|
||||
"SecretAccessKey": <AWS_SECRET_ACCESS_KEY>,
|
||||
"AccessKeyId": <AWS_ACCESS_KEY_ID>
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
4. Create an Ark-specific credentials file (`credentials-ark`) in your local directory:
|
||||
|
||||
```
|
||||
[default]
|
||||
aws_access_key_id=<AWS_ACCESS_KEY_ID>
|
||||
aws_secret_access_key=<AWS_SECRET_ACCESS_KEY>
|
||||
```
|
||||
|
||||
where the access key id and secret are the values returned from the `create-access-key` request.
|
||||
|
||||
## Credentials and configuration
|
||||
|
||||
In the Ark directory (i.e. where you extracted the release tarball), run the following to first set up namespaces, RBAC, and other scaffolding. To run in a custom namespace, make sure that you have edited the YAML files to specify the namespace. See [Run in custom namespace][0].
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/common/00-prereqs.yaml
|
||||
```
|
||||
|
||||
Create a Secret. In the directory of the credentials file you just created, run:
|
||||
|
||||
```bash
|
||||
kubectl create secret generic cloud-credentials \
|
||||
--namespace <ARK_NAMESPACE> \
|
||||
--from-file cloud=credentials-ark
|
||||
```
|
||||
|
||||
Specify the following values in the example files:
|
||||
|
||||
* In `config/aws/05-ark-backupstoragelocation.yaml`:
|
||||
|
||||
* Replace `<YOUR_BUCKET>` and `<YOUR_REGION>` (for S3 backup storage, region is optional and will be queried from the AWS S3 API if not provided). See the [BackupStorageLocation definition][21] for details.
|
||||
|
||||
* In `config/aws/06-ark-volumesnapshotlocation.yaml`:
|
||||
|
||||
* Replace `<YOUR_REGION>`. See the [VolumeSnapshotLocation definition][6] for details.
|
||||
|
||||
* (Optional, use only to specify multiple volume snapshot locations) In `config/aws/10-deployment.yaml` (or `config/aws/10-deployment-kube2iam.yaml`, as appropriate):
|
||||
|
||||
* Uncomment the `--default-volume-snapshot-locations` and replace provider locations with the values for your environment.
|
||||
|
||||
* (Optional) If you run the nginx example, in file `config/nginx-app/with-pv.yaml`:
|
||||
|
||||
* Replace `<YOUR_STORAGE_CLASS_NAME>` with `gp2`. This is AWS's default `StorageClass` name.
|
||||
|
||||
* (Optional) If you have multiple clusters and you want to support migration of resources between them, in file `config/aws/10-deployment.yaml`:
|
||||
|
||||
* Uncomment the environment variable `AWS_CLUSTER_NAME` and replace `<YOUR_CLUSTER_NAME>` with the current cluster's name. When restoring backup, it will make Ark (and cluster it's running on) claim ownership of AWS volumes created from snapshots taken on different cluster.
|
||||
The best way to get the current cluster's name is to either check it with used deployment tool or to read it directly from the EC2 instances tags.
|
||||
|
||||
The following listing shows how to get the cluster's nodes EC2 Tags. First, get the nodes external IDs (EC2 IDs):
|
||||
|
||||
```bash
|
||||
kubectl get nodes -o jsonpath='{.items[*].spec.externalID}'
|
||||
```
|
||||
|
||||
Copy one of the returned IDs `<ID>` and use it with the `aws` CLI tool to search for one of the following:
|
||||
|
||||
* The `kubernetes.io/cluster/<AWS_CLUSTER_NAME>` tag of the value `owned`. The `<AWS_CLUSTER_NAME>` is then your cluster's name:
|
||||
|
||||
```bash
|
||||
aws ec2 describe-tags --filters "Name=resource-id,Values=<ID>" "Name=value,Values=owned"
|
||||
```
|
||||
|
||||
* If the first output returns nothing, then check for the legacy Tag `KubernetesCluster` of the value `<AWS_CLUSTER_NAME>`:
|
||||
|
||||
```bash
|
||||
aws ec2 describe-tags --filters "Name=resource-id,Values=<ID>" "Name=key,Values=KubernetesCluster"
|
||||
```
|
||||
|
||||
## Start the server
|
||||
|
||||
In the root of your Ark directory, run:
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/aws/05-ark-backupstoragelocation.yaml
|
||||
kubectl apply -f config/aws/06-ark-volumesnapshotlocation.yaml
|
||||
kubectl apply -f config/aws/10-deployment.yaml
|
||||
```
|
||||
|
||||
## ALTERNATIVE: Setup permissions using kube2iam
|
||||
|
||||
[Kube2iam](https://github.com/jtblin/kube2iam) is a Kubernetes application that allows managing AWS IAM permissions for pod via annotations rather than operating on API keys.
|
||||
|
||||
> This path assumes you have `kube2iam` already running in your Kubernetes cluster. If that is not the case, please install it first, following the docs here: [https://github.com/jtblin/kube2iam](https://github.com/jtblin/kube2iam)
|
||||
|
||||
It can be set up for Ark by creating a role that will have required permissions, and later by adding the permissions annotation on the ark deployment to define which role it should use internally.
|
||||
|
||||
1. Create a Trust Policy document to allow the role being used for EC2 management & assume kube2iam role:
|
||||
|
||||
```bash
|
||||
cat > heptio-ark-trust-policy.json <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Principal": {
|
||||
"Service": "ec2.amazonaws.com"
|
||||
},
|
||||
"Action": "sts:AssumeRole"
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Principal": {
|
||||
"AWS": "arn:aws:iam::<AWS_ACCOUNT_ID>:role/<ROLE_CREATED_WHEN_INITIALIZING_KUBE2IAM>"
|
||||
},
|
||||
"Action": "sts:AssumeRole"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
```
|
||||
|
||||
2. Create the IAM role:
|
||||
|
||||
```bash
|
||||
aws iam create-role --role-name heptio-ark --assume-role-policy-document file://./heptio-ark-trust-policy.json
|
||||
```
|
||||
|
||||
3. Attach policies to give `heptio-ark` the necessary permissions:
|
||||
|
||||
```bash
|
||||
BUCKET=<YOUR_BUCKET>
|
||||
cat > heptio-ark-policy.json <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"ec2:DescribeVolumes",
|
||||
"ec2:DescribeSnapshots",
|
||||
"ec2:CreateTags",
|
||||
"ec2:CreateVolume",
|
||||
"ec2:CreateSnapshot",
|
||||
"ec2:DeleteSnapshot"
|
||||
],
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:GetObject",
|
||||
"s3:DeleteObject",
|
||||
"s3:PutObject",
|
||||
"s3:AbortMultipartUpload",
|
||||
"s3:ListMultipartUploadParts"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::${BUCKET}/*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::${BUCKET}"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
|
||||
aws iam put-role-policy \
|
||||
--role-name heptio-ark \
|
||||
--policy-name heptio-ark-policy \
|
||||
--policy-document file://./heptio-ark-policy.json
|
||||
```
|
||||
4. Update `AWS_ACCOUNT_ID` & `HEPTIO_ARK_ROLE_NAME` in the file `config/aws/10-deployment-kube2iam.yaml`:
|
||||
|
||||
```
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: heptio-ark
|
||||
name: ark
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: ark
|
||||
annotations:
|
||||
iam.amazonaws.com/role: arn:aws:iam::<AWS_ACCOUNT_ID>:role/<HEPTIO_ARK_ROLE_NAME>
|
||||
...
|
||||
```
|
||||
|
||||
5. Run Ark deployment using the file `config/aws/10-deployment-kube2iam.yaml`.
|
||||
|
||||
[0]: namespace.md
|
||||
[5]: https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html
|
||||
[6]: api-types/volumesnapshotlocation.md#aws
|
||||
[14]: http://docs.aws.amazon.com/IAM/latest/UserGuide/introduction.html
|
||||
[20]: faq.md
|
||||
[21]: api-types/backupstoragelocation.md#aws
|
||||
155
docs/azure-config.md
Normal file
155
docs/azure-config.md
Normal file
@@ -0,0 +1,155 @@
|
||||
# Run Ark on Azure
|
||||
|
||||
To configure Ark on Azure, you:
|
||||
|
||||
* Create your Azure storage account and blob container
|
||||
* Create Azure service principal for Ark
|
||||
* Configure the server
|
||||
* Create a Secret for your credentials
|
||||
|
||||
If you do not have the `az` Azure CLI 2.0 installed locally, follow the [install guide][18] to set it up.
|
||||
|
||||
Run:
|
||||
|
||||
```bash
|
||||
az login
|
||||
```
|
||||
|
||||
## Kubernetes cluster prerequisites
|
||||
|
||||
Ensure that the VMs for your agent pool allow Managed Disks. If I/O performance is critical,
|
||||
consider using Premium Managed Disks, which are SSD backed.
|
||||
|
||||
## Create Azure storage account and blob container
|
||||
|
||||
Heptio Ark requires a storage account and blob container in which to store backups.
|
||||
|
||||
The storage account can be created in the same Resource Group as your Kubernetes cluster or
|
||||
separated into its own Resource Group. The example below shows the storage account created in a
|
||||
separate `Ark_Backups` Resource Group.
|
||||
|
||||
The storage account needs to be created with a globally unique id since this is used for dns. In
|
||||
the sample script below, we're generating a random name using `uuidgen`, but you can come up with
|
||||
this name however you'd like, following the [Azure naming rules for storage accounts][19]. The
|
||||
storage account is created with encryption at rest capabilities (Microsoft managed keys) and is
|
||||
configured to only allow access via https.
|
||||
|
||||
```bash
|
||||
# Create a resource group for the backups storage account. Change the location as needed.
|
||||
AZURE_BACKUP_RESOURCE_GROUP=Ark_Backups
|
||||
az group create -n $AZURE_BACKUP_RESOURCE_GROUP --location WestUS
|
||||
|
||||
# Create the storage account
|
||||
AZURE_STORAGE_ACCOUNT_ID="ark$(uuidgen | cut -d '-' -f5 | tr '[A-Z]' '[a-z]')"
|
||||
az storage account create \
|
||||
--name $AZURE_STORAGE_ACCOUNT_ID \
|
||||
--resource-group $AZURE_BACKUP_RESOURCE_GROUP \
|
||||
--sku Standard_GRS \
|
||||
--encryption-services blob \
|
||||
--https-only true \
|
||||
--kind BlobStorage \
|
||||
--access-tier Hot
|
||||
```
|
||||
|
||||
Create the blob container named `ark`. Feel free to use a different name, preferably unique to a single Kubernetes cluster. See the [FAQ][20] for more details.
|
||||
|
||||
```bash
|
||||
az storage container create -n ark --public-access off --account-name $AZURE_STORAGE_ACCOUNT_ID
|
||||
```
|
||||
|
||||
## Get resource group for persistent volume snapshots
|
||||
|
||||
1. Set the name of the Resource Group that contains your Kubernetes cluster's virtual machines/disks.
|
||||
|
||||
> **WARNING**: If you're using [AKS][22], `AZURE_RESOURCE_GROUP` must be set to the name of the auto-generated resource group that is created
|
||||
when you provision your cluster in Azure, since this is the resource group that contains your cluster's virtual machines/disks.
|
||||
|
||||
```bash
|
||||
AZURE_RESOURCE_GROUP=<NAME_OF_RESOURCE_GROUP>
|
||||
```
|
||||
|
||||
If you are unsure of the Resource Group name, run the following command to get a list that you can select from. Then set the `AZURE_RESOURCE_GROUP` environment variable to the appropriate value.
|
||||
|
||||
```bash
|
||||
az group list --query '[].{ ResourceGroup: name, Location:location }'
|
||||
```
|
||||
|
||||
Get your cluster's Resource Group name from the `ResourceGroup` value in the response, and use it to set `$AZURE_RESOURCE_GROUP`.
|
||||
|
||||
## Create service principal
|
||||
|
||||
To integrate Ark with Azure, you must create an Ark-specific [service principal][17].
|
||||
|
||||
1. Obtain your Azure Account Subscription ID and Tenant ID:
|
||||
|
||||
```bash
|
||||
AZURE_SUBSCRIPTION_ID=`az account list --query '[?isDefault].id' -o tsv`
|
||||
AZURE_TENANT_ID=`az account list --query '[?isDefault].tenantId' -o tsv`
|
||||
```
|
||||
|
||||
1. Create a service principal with `Contributor` role. This will have subscription-wide access, so protect this credential. You can specify a password or let the `az ad sp create-for-rbac` command create one for you.
|
||||
|
||||
> If you'll be using Ark to backup multiple clusters with multiple blob containers, it may be desirable to create a unique username per cluster rather than the default `heptio-ark`.
|
||||
|
||||
```bash
|
||||
# Create service principal and specify your own password
|
||||
AZURE_CLIENT_SECRET=super_secret_and_high_entropy_password_replace_me_with_your_own
|
||||
az ad sp create-for-rbac --name "heptio-ark" --role "Contributor" --password $AZURE_CLIENT_SECRET
|
||||
|
||||
# Or create service principal and let the CLI generate a password for you. Make sure to capture the password.
|
||||
AZURE_CLIENT_SECRET=`az ad sp create-for-rbac --name "heptio-ark" --role "Contributor" --query 'password' -o tsv`
|
||||
|
||||
# After creating the service principal, obtain the client id
|
||||
AZURE_CLIENT_ID=`az ad sp list --display-name "heptio-ark" --query '[0].appId' -o tsv`
|
||||
```
|
||||
|
||||
## Credentials and configuration
|
||||
|
||||
In the Ark directory (i.e. where you extracted the release tarball), run the following to first set up namespaces, RBAC, and other scaffolding. To run in a custom namespace, make sure that you have edited the YAML file to specify the namespace. See [Run in custom namespace][0].
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/common/00-prereqs.yaml
|
||||
```
|
||||
|
||||
Now you need to create a Secret that contains all the environment variables you just set. The command looks like the following:
|
||||
|
||||
```bash
|
||||
kubectl create secret generic cloud-credentials \
|
||||
--namespace <ARK_NAMESPACE> \
|
||||
--from-literal AZURE_SUBSCRIPTION_ID=${AZURE_SUBSCRIPTION_ID} \
|
||||
--from-literal AZURE_TENANT_ID=${AZURE_TENANT_ID} \
|
||||
--from-literal AZURE_CLIENT_ID=${AZURE_CLIENT_ID} \
|
||||
--from-literal AZURE_CLIENT_SECRET=${AZURE_CLIENT_SECRET} \
|
||||
--from-literal AZURE_RESOURCE_GROUP=${AZURE_RESOURCE_GROUP}
|
||||
```
|
||||
|
||||
Now that you have your Azure credentials stored in a Secret, you need to replace some placeholder values in the template files. Specifically, you need to change the following:
|
||||
|
||||
* In file `config/azure/05-ark-backupstoragelocation.yaml`:
|
||||
|
||||
* Replace `<YOUR_BLOB_CONTAINER>`, `<YOUR_STORAGE_RESOURCE_GROUP>`, and `<YOUR_STORAGE_ACCOUNT>`. See the [BackupStorageLocation definition][21] for details.
|
||||
|
||||
* In file `config/azure/06-ark-volumesnapshotlocation.yaml`:
|
||||
|
||||
* Replace `<YOUR_TIMEOUT>`. See the [VolumeSnapshotLocation definition][8] for details.
|
||||
|
||||
* (Optional, use only if you need to specify multiple volume snapshot locations) In `config/azure/00-ark-deployment.yaml`:
|
||||
|
||||
* Uncomment the `--default-volume-snapshot-locations` and replace provider locations with the values for your environment.
|
||||
|
||||
## Start the server
|
||||
|
||||
In the root of your Ark directory, run:
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/azure/
|
||||
```
|
||||
|
||||
[0]: namespace.md
|
||||
[8]: api-types/volumesnapshotlocation.md#azure
|
||||
[17]: https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-application-objects
|
||||
[18]: https://docs.microsoft.com/en-us/cli/azure/install-azure-cli
|
||||
[19]: https://docs.microsoft.com/en-us/azure/architecture/best-practices/naming-conventions#storage
|
||||
[20]: faq.md
|
||||
[21]: api-types/backupstoragelocation.md#azure
|
||||
[22]: https://azure.microsoft.com/en-us/services/kubernetes-service/
|
||||
@@ -1,64 +1,75 @@
|
||||
# Build From Scratch
|
||||
# Build from source
|
||||
|
||||
While the [README][0] pulls from the Heptio image registry, you can also build your own Heptio Ark container with the following steps:
|
||||
* [Prerequisites][1]
|
||||
* [Download][2]
|
||||
* [Build][3]
|
||||
* [Test][12]
|
||||
* [Run][7]
|
||||
* [Vendoring dependencies][10]
|
||||
|
||||
* [0. Prerequisites][1]
|
||||
* [1. Download][2]
|
||||
* [2. Build][3]
|
||||
* [3. Test][12]
|
||||
* [4. Run][7]
|
||||
* [5. Vendoring dependencies][10]
|
||||
## Prerequisites
|
||||
|
||||
## 0. Prerequisites
|
||||
* Access to a Kubernetes cluster, version 1.7 or later. Version 1.7.5 or later is required to run `ark backup delete`.
|
||||
* A DNS server on the cluster
|
||||
* `kubectl` installed
|
||||
* [Go][5] installed (minimum version 1.8)
|
||||
|
||||
In addition to the handling the prerequisites mentioned in the [Quickstart][4], you should have [Go][5] installed (minimum version 1.8).
|
||||
## Getting the source
|
||||
|
||||
## 1. Download
|
||||
|
||||
Install with go:
|
||||
```
|
||||
```bash
|
||||
mkdir $HOME/go
|
||||
export GOPATH=$HOME/go
|
||||
go get github.com/heptio/ark
|
||||
```
|
||||
The files are installed in `$GOPATH/src/github.com/heptio/ark`.
|
||||
|
||||
## 2. Build
|
||||
Where `go` is your [import path][4] for Go.
|
||||
|
||||
For Go development, it is recommended to add the Go import path (`$HOME/go` in this example) to your path.
|
||||
|
||||
|
||||
## Build
|
||||
|
||||
You can build your Ark image locally on the machine where you run your cluster, or you can push it to a private registry. This section covers both workflows.
|
||||
|
||||
Set the `$REGISTRY` environment variable (used in the `Makefile`) if you want to push the Heptio Ark images to your own registry. This allows any node in your cluster to pull your locally built image.
|
||||
Set the `$REGISTRY` environment variable (used in the `Makefile`) to push the Heptio Ark images to your own registry. This allows any node in your cluster to pull your locally built image.
|
||||
|
||||
`$PROJECT` and `$VERSION` environment variables are also specified in the `Makefile`, and can be similarly modified as desired.
|
||||
In the Ark root directory, to build your container with the tag `$REGISTRY/ark:$VERSION`, run:
|
||||
|
||||
Run the following in the Ark root directory to build your container with the tag `$REGISTRY/$PROJECT:$VERSION`:
|
||||
```
|
||||
make container
|
||||
```
|
||||
|
||||
To push your image to a registry, use `make push`.
|
||||
|
||||
### Updating generated files
|
||||
### Update generated files
|
||||
|
||||
The following files are automatically generated from the source code:
|
||||
|
||||
There are several files that are automatically generated based on the source code in the repository.
|
||||
These include:
|
||||
* The clientset
|
||||
* Listers
|
||||
* Shared informers
|
||||
* Documentation
|
||||
* Protobuf/gRPC types
|
||||
|
||||
Run `make update` to regenerate files if you make the following changes:
|
||||
|
||||
If you make any of the following changes, you will need to run `make update` to regenerate
|
||||
automatically generated files:
|
||||
* Add/edit/remove command line flags and/or their help text
|
||||
* Add/edit/remove commands or subcommands
|
||||
* Add new API types
|
||||
|
||||
Run [generate-proto.sh][13] to regenerate files if you make the following changes:
|
||||
|
||||
* Add/edit/remove protobuf message or service definitions. These changes require the [proto compiler][14].
|
||||
|
||||
### Cross compiling
|
||||
|
||||
By default, `make` will build an `ark` binary that runs on your host operating system and
|
||||
architecture. If you want to build for another platform, you can do so with `make
|
||||
build-<GOOS>-<GOARCH` - for example, to build for the Mac, you would do `make build-darwin-amd64`.
|
||||
All binaries are placed in `_output/bin/<GOOS>/<GOARCH>`, e.g. `_output/bin/darwin/amd64/ark`.
|
||||
By default, `make build` builds an `ark` binary for `linux-amd64`.
|
||||
To build for another platform, run `make build-<GOOS>-<GOARCH>`.
|
||||
For example, to build for the Mac, run `make build-darwin-amd64`.
|
||||
All binaries are placed in `_output/bin/<GOOS>/<GOARCH>`-- for example, `_output/bin/darwin/amd64/ark`.
|
||||
|
||||
Ark's `Makefile` has a convenience target, `all-build`, that builds the following platforms:
|
||||
|
||||
* linux-amd64
|
||||
* linux-arm
|
||||
* linux-arm64
|
||||
@@ -72,40 +83,143 @@ files (clientset, listers, shared informers, docs) are up to date.
|
||||
|
||||
## 4. Run
|
||||
|
||||
### Considerations
|
||||
### Prerequisites
|
||||
|
||||
When running Heptio Ark, you will need to account for the following (all of which are handled in the [`/examples`][6] manifests):
|
||||
|
||||
* Appropriate RBAC permissions in the cluster
|
||||
* *Read access* for all data from the source cluster and namespaces
|
||||
* *Write access* to the target cluster and namespaces
|
||||
* Read access for all data from the source cluster and namespaces
|
||||
* Write access to the target cluster and namespaces
|
||||
* Cloud provider credentials
|
||||
* *Read/write access* to volumes
|
||||
* *Read/write access* to object storage for backup data
|
||||
* A [Config object][8] definition for the Ark server
|
||||
* Read/write access to volumes
|
||||
* Read/write access to object storage for backup data
|
||||
* A [BackupStorageLocation][20] object definition for the Ark server
|
||||
* (Optional) A [VolumeSnapshotLocation][21] object definition for the Ark server, to take PV snapshots
|
||||
|
||||
See [Cloud Provider Specifics][9] for a more detailed guide.
|
||||
### Create a cluster
|
||||
|
||||
### Specifying your image
|
||||
To provision a cluster on AWS using Amazon’s official CloudFormation templates, here are two options:
|
||||
|
||||
Once your Ark deployment is up and running, **you need to replace the Heptio-provided Ark image with the specific one that you built.** You can do so with the following command:
|
||||
* EC2 [Quick Start for Kubernetes][17]
|
||||
|
||||
* eksctl - [a CLI for Amazon EKS][18]
|
||||
|
||||
### Option 1: Run your Ark server locally
|
||||
|
||||
Running the Ark server locally can speed up iterative development. This eliminates the need to rebuild the Ark server
|
||||
image and redeploy it to the cluster with each change.
|
||||
|
||||
#### 1. Set enviroment variables
|
||||
|
||||
Set the appropriate environment variable for your cloud provider:
|
||||
|
||||
AWS: [AWS_SHARED_CREDENTIALS_FILE][15]
|
||||
|
||||
GCP: [GOOGLE_APPLICATION_CREDENTIALS][16]
|
||||
|
||||
Azure:
|
||||
|
||||
1. AZURE_CLIENT_ID
|
||||
|
||||
2. AZURE_CLIENT_SECRET
|
||||
|
||||
3. AZURE_SUBSCRIPTION_ID
|
||||
|
||||
4. AZURE_TENANT_ID
|
||||
|
||||
5. AZURE_STORAGE_ACCOUNT_ID
|
||||
|
||||
6. AZURE_STORAGE_KEY
|
||||
|
||||
7. AZURE_RESOURCE_GROUP
|
||||
|
||||
#### 2. Create resources in a cluster
|
||||
|
||||
You may create resources on a cluster using our [example configurations][19].
|
||||
|
||||
##### Example
|
||||
|
||||
Here is how to setup using an existing cluster in AWS: At the root of the Ark repo:
|
||||
|
||||
- Edit `examples/aws/05-ark-backupstoragelocation.yaml` to point to your AWS S3 bucket and region. Note: you can run `aws s3api list-buckets` to get the name of all your buckets.
|
||||
|
||||
- (Optional) Edit `examples/aws/06-ark-volumesnapshotlocation.yaml` to point to your AWS region.
|
||||
|
||||
Then run the commands below.
|
||||
|
||||
`00-prereqs.yaml` contains all our CustomResourceDefinitions (CRDs) that allow us to perform CRUD operations on backups, restores, schedules, etc. it also contains the `heptio-ark` namespace, the `ark` ServiceAccount, and a cluster role binding to grant the `ark` ServiceAccount the cluster-admin role:
|
||||
|
||||
```bash
|
||||
kubectl apply -f examples/common/00-prereqs.yaml
|
||||
```
|
||||
kubectl set image deployment/ark ark=$REGISTRY/$PROJECT:$VERSION
|
||||
|
||||
`10-deployment.yaml` is a sample Ark config resource for AWS:
|
||||
|
||||
```bash
|
||||
kubectl apply -f examples/aws/10-deployment.yaml
|
||||
```
|
||||
where `$REGISTRY`, `$PROJECT`, and `$VERSION` match what you used in the [build step][3].
|
||||
|
||||
And `05-ark-backupstoragelocation.yaml` specifies the location of your backup storage, together with the optional `06-ark-volumesnapshotlocation.yaml`:
|
||||
|
||||
```bash
|
||||
kubectl apply -f examples/aws/05-ark-backupstoragelocation.yaml
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```bash
|
||||
kubectl apply -f examples/aws/05-ark-backupstoragelocation.yaml examples/aws/06-ark-volumesnapshotlocation.yaml
|
||||
```
|
||||
|
||||
### 3. Start the Ark server
|
||||
|
||||
* Make sure `ark` is in your `PATH` or specify the full path.
|
||||
|
||||
* Set variable for Ark as needed. The variables below can be exported as environment variables or passed as CLI cmd flags:
|
||||
* `--kubeconfig`: set the path to the kubeconfig file the Ark server uses to talk to the Kubernetes apiserver
|
||||
* `--namespace`: the set namespace where the Ark server should look for backups, schedules, restores
|
||||
* `--log-level`: set the Ark server's log level
|
||||
* `--plugin-dir`: set the directory where the Ark server looks for plugins
|
||||
* `--metrics-address`: set the bind address and port where Prometheus metrics are exposed
|
||||
|
||||
* Start the server: `ark server`
|
||||
|
||||
### Option 2: Run your Ark server in a deployment
|
||||
|
||||
1. Install Ark using a deployment:
|
||||
|
||||
We have examples of deployments for different cloud providers in `examples/<cloud-provider>/10-deployment.yaml`.
|
||||
|
||||
2. Replace the deployment's default Ark image with the image that you built. Run:
|
||||
|
||||
```
|
||||
kubectl --namespace=heptio-ark set image deployment/ark ark=$REGISTRY/ark:$VERSION
|
||||
```
|
||||
|
||||
where `$REGISTRY` and `$VERSION` are the values that you built Ark with.
|
||||
|
||||
## 5. Vendoring dependencies
|
||||
If you need to add or update the vendored dependencies, please see [Vendoring dependencies][11].
|
||||
|
||||
If you need to add or update the vendored dependencies, see [Vendoring dependencies][11].
|
||||
|
||||
[0]: ../README.md
|
||||
[1]: #0-prerequisites
|
||||
[2]: #1-download
|
||||
[3]: #2-build
|
||||
[4]: ../README.md#quickstart
|
||||
[1]: #prerequisites
|
||||
[2]: #download
|
||||
[3]: #build
|
||||
[4]: https://blog.golang.org/organizing-go-code
|
||||
[5]: https://golang.org/doc/install
|
||||
[6]: /examples
|
||||
[7]: #4-run
|
||||
[8]: reference.md#ark-config-definition
|
||||
[9]: cloud-provider-specifics.md
|
||||
[10]: #4-vendoring-dependencies
|
||||
[6]: https://github.com/heptio/ark/tree/master/examples
|
||||
[7]: #run
|
||||
[8]: config-definition.md
|
||||
[10]: #vendoring-dependencies
|
||||
[11]: vendoring-dependencies.md
|
||||
[12]: #3-test
|
||||
[12]: #test
|
||||
[13]: https://github.com/heptio/ark/blob/master/hack/generate-proto.sh
|
||||
[14]: https://grpc.io/docs/quickstart/go.html#install-protocol-buffers-v3
|
||||
[15]: https://docs.aws.amazon.com/cli/latest/topic/config-vars.html#the-shared-credentials-file
|
||||
[16]: https://cloud.google.com/docs/authentication/getting-started#setting_the_environment_variable
|
||||
[17]: https://aws.amazon.com/quickstart/architecture/heptio-kubernetes/
|
||||
[18]: https://eksctl.io/
|
||||
[19]: ../examples/README.md
|
||||
[20]: api-types/backupstoragelocation.md
|
||||
[21]: api-types/volumesnapshotlocation.md
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
# Command line reference
|
||||
|
||||
The Ark client provides a CLI that allows you to initiate ad-hoc backups, scheduled backups, or restores.
|
||||
|
||||
*The files in this directory enumerate each of the possible `ark` commands and their flags. Note that you can also find this info with the CLI itself, using the `--help` flag.*
|
||||
|
||||
## Running the client
|
||||
|
||||
While it is possible to build and run the `ark` executable yourself, it is recommended to use the containerized version. Use the alias described in the quickstart:
|
||||
|
||||
```
|
||||
alias ark='docker run --rm -u $(id -u) -v $(dirname $KUBECONFIG):/kubeconfig -e KUBECONFIG=/kubeconfig/$(basename $KUBECONFIG) gcr.io/heptio-images/ark:latest'
|
||||
```
|
||||
|
||||
Assuming that your `KUBECONFIG` variable is set, this alias takes care of specifying the appropriate Kubernetes cluster credentials for you.
|
||||
|
||||
## Kubernetes cluster credentials
|
||||
In general, Ark will search for your cluster credentials in the following order:
|
||||
* `--kubeconfig` command line flag
|
||||
* `$KUBECONFIG` environment variable
|
||||
* In-cluster credentials--this only works when you are running Ark in a pod
|
||||
@@ -1,38 +0,0 @@
|
||||
## ark
|
||||
|
||||
Back up and restore Kubernetes cluster resources.
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Heptio Ark is a tool for managing disaster recovery, specifically for Kubernetes
|
||||
cluster resources. It provides a simple, configurable, and operationally robust
|
||||
way to back up your application state and associated data.
|
||||
|
||||
If you're familiar with kubectl, Ark supports a similar model, allowing you to
|
||||
execute commands such as 'ark get backup' and 'ark create schedule'. The same
|
||||
operations can also be performed as 'ark backup get' and 'ark schedule create'.
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
-h, --help help for ark
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark backup](ark_backup.md) - Work with backups
|
||||
* [ark create](ark_create.md) - Create ark resources
|
||||
* [ark get](ark_get.md) - Get ark resources
|
||||
* [ark restore](ark_restore.md) - Work with restores
|
||||
* [ark schedule](ark_schedule.md) - Work with schedules
|
||||
* [ark server](ark_server.md) - Run the ark server
|
||||
* [ark version](ark_version.md) - Print the ark version and associated image
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
## ark backup
|
||||
|
||||
Work with backups
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Work with backups
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for backup
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark](ark.md) - Back up and restore Kubernetes cluster resources.
|
||||
* [ark backup create](ark_backup_create.md) - Create a backup
|
||||
* [ark backup download](ark_backup_download.md) - Download a backup
|
||||
* [ark backup get](ark_backup_get.md) - Get backups
|
||||
* [ark backup logs](ark_backup_logs.md) - Get backup logs
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
## ark backup create
|
||||
|
||||
Create a backup
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Create a backup
|
||||
|
||||
```
|
||||
ark backup create NAME [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
--exclude-namespaces stringArray namespaces to exclude from the backup
|
||||
--exclude-resources stringArray resources to exclude from the backup, formatted as resource.group, such as storageclasses.storage.k8s.io
|
||||
-h, --help help for create
|
||||
--include-cluster-resources optionalBool[=true] include cluster-scoped resources in the backup
|
||||
--include-namespaces stringArray namespaces to include in the backup (use '*' for all namespaces) (default *)
|
||||
--include-resources stringArray resources to include in the backup, formatted as resource.group, such as storageclasses.storage.k8s.io (use '*' for all resources)
|
||||
--label-columns stringArray a comma-separated list of labels to be displayed as columns
|
||||
--labels mapStringString labels to apply to the backup
|
||||
-o, --output string Output display format. For create commands, display the object but do not send it to the server. Valid formats are 'table', 'json', and 'yaml'.
|
||||
-l, --selector labelSelector only back up resources matching this label selector (default <none>)
|
||||
--show-labels show labels in the last column
|
||||
--snapshot-volumes optionalBool[=true] take snapshots of PersistentVolumes as part of the backup
|
||||
--ttl duration how long before the backup can be garbage collected (default 24h0m0s)
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark backup](ark_backup.md) - Work with backups
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
## ark backup download
|
||||
|
||||
Download a backup
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Download a backup
|
||||
|
||||
```
|
||||
ark backup download NAME [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
--force forces the download and will overwrite file if it exists already
|
||||
-h, --help help for download
|
||||
-o, --output string path to output file. Defaults to <NAME>-data.tar.gz in the current directory
|
||||
--timeout duration maximum time to wait to process download request (default 1m0s)
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark backup](ark_backup.md) - Work with backups
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
## ark backup get
|
||||
|
||||
Get backups
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Get backups
|
||||
|
||||
```
|
||||
ark backup get [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for get
|
||||
--label-columns stringArray a comma-separated list of labels to be displayed as columns
|
||||
-o, --output string Output display format. For create commands, display the object but do not send it to the server. Valid formats are 'table', 'json', and 'yaml'. (default "table")
|
||||
-l, --selector string only show items matching this label selector
|
||||
--show-labels show labels in the last column
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark backup](ark_backup.md) - Work with backups
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
## ark backup logs
|
||||
|
||||
Get backup logs
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Get backup logs
|
||||
|
||||
```
|
||||
ark backup logs BACKUP [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for logs
|
||||
--timeout duration how long to wait to receive logs (default 1m0s)
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark backup](ark_backup.md) - Work with backups
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
## ark create
|
||||
|
||||
Create ark resources
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Create ark resources
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for create
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark](ark.md) - Back up and restore Kubernetes cluster resources.
|
||||
* [ark create backup](ark_create_backup.md) - Create a backup
|
||||
* [ark create restore](ark_create_restore.md) - Create a restore
|
||||
* [ark create schedule](ark_create_schedule.md) - Create a schedule
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
## ark create backup
|
||||
|
||||
Create a backup
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Create a backup
|
||||
|
||||
```
|
||||
ark create backup NAME [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
--exclude-namespaces stringArray namespaces to exclude from the backup
|
||||
--exclude-resources stringArray resources to exclude from the backup, formatted as resource.group, such as storageclasses.storage.k8s.io
|
||||
-h, --help help for backup
|
||||
--include-cluster-resources optionalBool[=true] include cluster-scoped resources in the backup
|
||||
--include-namespaces stringArray namespaces to include in the backup (use '*' for all namespaces) (default *)
|
||||
--include-resources stringArray resources to include in the backup, formatted as resource.group, such as storageclasses.storage.k8s.io (use '*' for all resources)
|
||||
--label-columns stringArray a comma-separated list of labels to be displayed as columns
|
||||
--labels mapStringString labels to apply to the backup
|
||||
-o, --output string Output display format. For create commands, display the object but do not send it to the server. Valid formats are 'table', 'json', and 'yaml'.
|
||||
-l, --selector labelSelector only back up resources matching this label selector (default <none>)
|
||||
--show-labels show labels in the last column
|
||||
--snapshot-volumes optionalBool[=true] take snapshots of PersistentVolumes as part of the backup
|
||||
--ttl duration how long before the backup can be garbage collected (default 24h0m0s)
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark create](ark_create.md) - Create ark resources
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
## ark create restore
|
||||
|
||||
Create a restore
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Create a restore
|
||||
|
||||
```
|
||||
ark create restore BACKUP [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
--exclude-namespaces stringArray namespaces to exclude from the restore
|
||||
--exclude-resources stringArray resources to exclude from the restore, formatted as resource.group, such as storageclasses.storage.k8s.io
|
||||
-h, --help help for restore
|
||||
--include-cluster-resources optionalBool[=true] include cluster-scoped resources in the restore
|
||||
--include-namespaces stringArray namespaces to include in the restore (use '*' for all namespaces) (default *)
|
||||
--include-resources stringArray resources to include in the restore, formatted as resource.group, such as storageclasses.storage.k8s.io (use '*' for all resources)
|
||||
--label-columns stringArray a comma-separated list of labels to be displayed as columns
|
||||
--labels mapStringString labels to apply to the restore
|
||||
--namespace-mappings mapStringString namespace mappings from name in the backup to desired restored name in the form src1:dst1,src2:dst2,...
|
||||
-o, --output string Output display format. For create commands, display the object but do not send it to the server. Valid formats are 'table', 'json', and 'yaml'.
|
||||
--restore-volumes optionalBool[=true] whether to restore volumes from snapshots
|
||||
-l, --selector labelSelector only restore resources matching this label selector (default <none>)
|
||||
--show-labels show labels in the last column
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark create](ark_create.md) - Create ark resources
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
## ark create schedule
|
||||
|
||||
Create a schedule
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Create a schedule
|
||||
|
||||
```
|
||||
ark create schedule NAME [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
--exclude-namespaces stringArray namespaces to exclude from the backup
|
||||
--exclude-resources stringArray resources to exclude from the backup, formatted as resource.group, such as storageclasses.storage.k8s.io
|
||||
-h, --help help for schedule
|
||||
--include-cluster-resources optionalBool[=true] include cluster-scoped resources in the backup
|
||||
--include-namespaces stringArray namespaces to include in the backup (use '*' for all namespaces) (default *)
|
||||
--include-resources stringArray resources to include in the backup, formatted as resource.group, such as storageclasses.storage.k8s.io (use '*' for all resources)
|
||||
--label-columns stringArray a comma-separated list of labels to be displayed as columns
|
||||
--labels mapStringString labels to apply to the backup
|
||||
-o, --output string Output display format. For create commands, display the object but do not send it to the server. Valid formats are 'table', 'json', and 'yaml'.
|
||||
--schedule string a cron expression specifying a recurring schedule for this backup to run
|
||||
-l, --selector labelSelector only back up resources matching this label selector (default <none>)
|
||||
--show-labels show labels in the last column
|
||||
--snapshot-volumes optionalBool[=true] take snapshots of PersistentVolumes as part of the backup
|
||||
--ttl duration how long before the backup can be garbage collected (default 24h0m0s)
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark create](ark_create.md) - Create ark resources
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
## ark get
|
||||
|
||||
Get ark resources
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Get ark resources
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for get
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark](ark.md) - Back up and restore Kubernetes cluster resources.
|
||||
* [ark get backups](ark_get_backups.md) - Get backups
|
||||
* [ark get restores](ark_get_restores.md) - Get restores
|
||||
* [ark get schedules](ark_get_schedules.md) - Get schedules
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
## ark get backups
|
||||
|
||||
Get backups
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Get backups
|
||||
|
||||
```
|
||||
ark get backups [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for backups
|
||||
--label-columns stringArray a comma-separated list of labels to be displayed as columns
|
||||
-o, --output string Output display format. For create commands, display the object but do not send it to the server. Valid formats are 'table', 'json', and 'yaml'. (default "table")
|
||||
-l, --selector string only show items matching this label selector
|
||||
--show-labels show labels in the last column
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark get](ark_get.md) - Get ark resources
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
## ark get restores
|
||||
|
||||
Get restores
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Get restores
|
||||
|
||||
```
|
||||
ark get restores [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for restores
|
||||
--label-columns stringArray a comma-separated list of labels to be displayed as columns
|
||||
-o, --output string Output display format. For create commands, display the object but do not send it to the server. Valid formats are 'table', 'json', and 'yaml'. (default "table")
|
||||
-l, --selector string only show items matching this label selector
|
||||
--show-labels show labels in the last column
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark get](ark_get.md) - Get ark resources
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
## ark get schedules
|
||||
|
||||
Get schedules
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Get schedules
|
||||
|
||||
```
|
||||
ark get schedules [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for schedules
|
||||
--label-columns stringArray a comma-separated list of labels to be displayed as columns
|
||||
-o, --output string Output display format. For create commands, display the object but do not send it to the server. Valid formats are 'table', 'json', and 'yaml'. (default "table")
|
||||
-l, --selector string only show items matching this label selector
|
||||
--show-labels show labels in the last column
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark get](ark_get.md) - Get ark resources
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
## ark restore
|
||||
|
||||
Work with restores
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Work with restores
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for restore
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark](ark.md) - Back up and restore Kubernetes cluster resources.
|
||||
* [ark restore create](ark_restore_create.md) - Create a restore
|
||||
* [ark restore delete](ark_restore_delete.md) - Delete a restore
|
||||
* [ark restore get](ark_restore_get.md) - Get restores
|
||||
* [ark restore logs](ark_restore_logs.md) - Get restore logs
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
## ark restore create
|
||||
|
||||
Create a restore
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Create a restore
|
||||
|
||||
```
|
||||
ark restore create BACKUP [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
--exclude-namespaces stringArray namespaces to exclude from the restore
|
||||
--exclude-resources stringArray resources to exclude from the restore, formatted as resource.group, such as storageclasses.storage.k8s.io
|
||||
-h, --help help for create
|
||||
--include-cluster-resources optionalBool[=true] include cluster-scoped resources in the restore
|
||||
--include-namespaces stringArray namespaces to include in the restore (use '*' for all namespaces) (default *)
|
||||
--include-resources stringArray resources to include in the restore, formatted as resource.group, such as storageclasses.storage.k8s.io (use '*' for all resources)
|
||||
--label-columns stringArray a comma-separated list of labels to be displayed as columns
|
||||
--labels mapStringString labels to apply to the restore
|
||||
--namespace-mappings mapStringString namespace mappings from name in the backup to desired restored name in the form src1:dst1,src2:dst2,...
|
||||
-o, --output string Output display format. For create commands, display the object but do not send it to the server. Valid formats are 'table', 'json', and 'yaml'.
|
||||
--restore-volumes optionalBool[=true] whether to restore volumes from snapshots
|
||||
-l, --selector labelSelector only restore resources matching this label selector (default <none>)
|
||||
--show-labels show labels in the last column
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark restore](ark_restore.md) - Work with restores
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
## ark restore delete
|
||||
|
||||
Delete a restore
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Delete a restore
|
||||
|
||||
```
|
||||
ark restore delete NAME [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for delete
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark restore](ark_restore.md) - Work with restores
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
## ark restore get
|
||||
|
||||
Get restores
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Get restores
|
||||
|
||||
```
|
||||
ark restore get [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for get
|
||||
--label-columns stringArray a comma-separated list of labels to be displayed as columns
|
||||
-o, --output string Output display format. For create commands, display the object but do not send it to the server. Valid formats are 'table', 'json', and 'yaml'. (default "table")
|
||||
-l, --selector string only show items matching this label selector
|
||||
--show-labels show labels in the last column
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark restore](ark_restore.md) - Work with restores
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
## ark restore logs
|
||||
|
||||
Get restore logs
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Get restore logs
|
||||
|
||||
```
|
||||
ark restore logs RESTORE [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for logs
|
||||
--timeout duration how long to wait to receive logs (default 1m0s)
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark restore](ark_restore.md) - Work with restores
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
## ark schedule
|
||||
|
||||
Work with schedules
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Work with schedules
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for schedule
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark](ark.md) - Back up and restore Kubernetes cluster resources.
|
||||
* [ark schedule create](ark_schedule_create.md) - Create a schedule
|
||||
* [ark schedule delete](ark_schedule_delete.md) - Delete a schedule
|
||||
* [ark schedule get](ark_schedule_get.md) - Get schedules
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
## ark schedule create
|
||||
|
||||
Create a schedule
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Create a schedule
|
||||
|
||||
```
|
||||
ark schedule create NAME [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
--exclude-namespaces stringArray namespaces to exclude from the backup
|
||||
--exclude-resources stringArray resources to exclude from the backup, formatted as resource.group, such as storageclasses.storage.k8s.io
|
||||
-h, --help help for create
|
||||
--include-cluster-resources optionalBool[=true] include cluster-scoped resources in the backup
|
||||
--include-namespaces stringArray namespaces to include in the backup (use '*' for all namespaces) (default *)
|
||||
--include-resources stringArray resources to include in the backup, formatted as resource.group, such as storageclasses.storage.k8s.io (use '*' for all resources)
|
||||
--label-columns stringArray a comma-separated list of labels to be displayed as columns
|
||||
--labels mapStringString labels to apply to the backup
|
||||
-o, --output string Output display format. For create commands, display the object but do not send it to the server. Valid formats are 'table', 'json', and 'yaml'.
|
||||
--schedule string a cron expression specifying a recurring schedule for this backup to run
|
||||
-l, --selector labelSelector only back up resources matching this label selector (default <none>)
|
||||
--show-labels show labels in the last column
|
||||
--snapshot-volumes optionalBool[=true] take snapshots of PersistentVolumes as part of the backup
|
||||
--ttl duration how long before the backup can be garbage collected (default 24h0m0s)
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark schedule](ark_schedule.md) - Work with schedules
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
## ark schedule delete
|
||||
|
||||
Delete a schedule
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Delete a schedule
|
||||
|
||||
```
|
||||
ark schedule delete NAME [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for delete
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark schedule](ark_schedule.md) - Work with schedules
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
## ark schedule get
|
||||
|
||||
Get schedules
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Get schedules
|
||||
|
||||
```
|
||||
ark schedule get [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for get
|
||||
--label-columns stringArray a comma-separated list of labels to be displayed as columns
|
||||
-o, --output string Output display format. For create commands, display the object but do not send it to the server. Valid formats are 'table', 'json', and 'yaml'. (default "table")
|
||||
-l, --selector string only show items matching this label selector
|
||||
--show-labels show labels in the last column
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark schedule](ark_schedule.md) - Work with schedules
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
## ark server
|
||||
|
||||
Run the ark server
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Run the ark server
|
||||
|
||||
```
|
||||
ark server [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for server
|
||||
--log-level the level at which to log. Valid values are debug, info, warning, error, fatal, panic. (default info)
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark](ark.md) - Back up and restore Kubernetes cluster resources.
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
## ark version
|
||||
|
||||
Print the ark version and associated image
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Print the ark version and associated image
|
||||
|
||||
```
|
||||
ark version [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for version
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark](ark.md) - Back up and restore Kubernetes cluster resources.
|
||||
|
||||
@@ -1,362 +0,0 @@
|
||||
# Cloud Provider Specifics
|
||||
|
||||
> NOTE: Documentation may change between releases. See the [Changelog][20] for links to previous versions of this repository and its docs.
|
||||
>
|
||||
> To ensure that you are working off a specific release, `git checkout <VERSION_TAG>` where `<VERSION_TAG>` is the appropriate tag for the Ark version you wish to use (e.g. "v0.3.3"). You should `git checkout master` only if you're planning on [building the Ark image from scratch][21].
|
||||
|
||||
While the [Quickstart][0] uses a local storage service to quickly set up Heptio Ark as a demonstration, this document details additional configurations that are required when integrating with the cloud providers below:
|
||||
|
||||
* [Setup][12]
|
||||
* [AWS][1]
|
||||
* [GCP][2]
|
||||
* [Azure][3]
|
||||
* [Run][13]
|
||||
* [Ark server][9]
|
||||
* [Basic example (no PVs)][10]
|
||||
* [Snapshot example (with PVs)][11]
|
||||
|
||||
|
||||
## Setup
|
||||
### AWS
|
||||
|
||||
#### IAM user creation
|
||||
|
||||
To integrate Heptio Ark with AWS, you should follow the instructions below to create an Ark-specific [IAM user][14].
|
||||
|
||||
1. If you do not have the AWS CLI locally installed, follow the [user guide][5] to set it up.
|
||||
|
||||
2. Create an IAM user:
|
||||
|
||||
```
|
||||
aws iam create-user --user-name heptio-ark
|
||||
```
|
||||
|
||||
3. Attach a policy to give `heptio-ark` the necessary permissions:
|
||||
|
||||
```
|
||||
aws iam attach-user-policy \
|
||||
--policy-arn arn:aws:iam::aws:policy/AmazonS3FullAccess \
|
||||
--user-name heptio-ark
|
||||
aws iam attach-user-policy \
|
||||
--policy-arn arn:aws:iam::aws:policy/AmazonEC2FullAccess \
|
||||
--user-name heptio-ark
|
||||
```
|
||||
|
||||
4. Create an access key for the user:
|
||||
|
||||
```
|
||||
aws iam create-access-key --user-name heptio-ark
|
||||
```
|
||||
|
||||
The result should look like:
|
||||
|
||||
```
|
||||
{
|
||||
"AccessKey": {
|
||||
"UserName": "heptio-ark",
|
||||
"Status": "Active",
|
||||
"CreateDate": "2017-07-31T22:24:41.576Z",
|
||||
"SecretAccessKey": <AWS_SECRET_ACCESS_KEY>,
|
||||
"AccessKeyId": <AWS_ACCESS_KEY_ID>
|
||||
}
|
||||
}
|
||||
```
|
||||
5. Using the output from the previous command, create an Ark-specific credentials file (`credentials-ark`) in your local directory that looks like the following:
|
||||
|
||||
```
|
||||
[default]
|
||||
aws_access_key_id=<AWS_ACCESS_KEY_ID>
|
||||
aws_secret_access_key=<AWS_SECRET_ACCESS_KEY>
|
||||
```
|
||||
|
||||
|
||||
#### Credentials and configuration
|
||||
|
||||
In the Ark root directory, run the following to first set up namespaces, RBAC, and other scaffolding:
|
||||
```
|
||||
kubectl apply -f examples/common/00-prereqs.yaml
|
||||
```
|
||||
|
||||
Create a Secret, running this command in the local directory of the credentials file you just created:
|
||||
|
||||
```
|
||||
kubectl create secret generic cloud-credentials \
|
||||
--namespace heptio-ark \
|
||||
--from-file cloud=credentials-ark
|
||||
```
|
||||
|
||||
Now that you have your IAM user credentials stored in a Secret, you need to replace some placeholder values in the template files. Specifically, you need to change the following:
|
||||
|
||||
* In file `examples/aws/00-ark-config.yaml`:
|
||||
|
||||
* Replace `<YOUR_BUCKET>` and `<YOUR_REGION>`. See the [Config definition][6] for details.
|
||||
|
||||
|
||||
* In file `examples/common/10-deployment.yaml`:
|
||||
|
||||
* Make sure that `spec.template.spec.containers[*].env.name` is "AWS_SHARED_CREDENTIALS_FILE".
|
||||
|
||||
|
||||
* (Optional) If you are running the Nginx example, in file `examples/nginx-app/with-pv.yaml`:
|
||||
|
||||
* Replace `<YOUR_STORAGE_CLASS_NAME>` with `gp2`. This is AWS's default `StorageClass` name.
|
||||
|
||||
|
||||
### GCP
|
||||
|
||||
#### Service account creation
|
||||
|
||||
To integrate Heptio Ark with GCP, you should follow the instructions below to create an Ark-specific [Service Account][15].
|
||||
|
||||
1. If you do not have the gcloud CLI locally installed, follow the [user guide][16] to set it up.
|
||||
|
||||
2. View your current config settings:
|
||||
|
||||
```
|
||||
gcloud config list
|
||||
```
|
||||
|
||||
Store the `project` value from the results in the environment variable `$PROJECT_ID`.
|
||||
|
||||
2. Create a service account:
|
||||
|
||||
```
|
||||
gcloud iam service-accounts create heptio-ark \
|
||||
--display-name "Heptio Ark service account"
|
||||
```
|
||||
Then list all accounts and find the `heptio-ark` account you just created:
|
||||
```
|
||||
gcloud iam service-accounts list
|
||||
```
|
||||
Set the `$SERVICE_ACCOUNT_EMAIL` variable to match its `email` value.
|
||||
|
||||
3. Attach policies to give `heptio-ark` the necessary permissions to function (replacing placeholders appropriately):
|
||||
|
||||
```
|
||||
gcloud projects add-iam-policy-binding $PROJECT_ID \
|
||||
--member serviceAccount:$SERVICE_ACCOUNT_EMAIL \
|
||||
--role roles/compute.storageAdmin
|
||||
gcloud projects add-iam-policy-binding $PROJECT_ID \
|
||||
--member serviceAccount:$SERVICE_ACCOUNT_EMAIL \
|
||||
--role roles/storage.admin
|
||||
```
|
||||
|
||||
4. Create a service account key, specifying an output file (`credentials-ark`) in your local directory:
|
||||
|
||||
```
|
||||
gcloud iam service-accounts keys create credentials-ark \
|
||||
--iam-account $SERVICE_ACCOUNT_EMAIL
|
||||
```
|
||||
|
||||
#### Credentials and configuration
|
||||
|
||||
In the Ark root directory, run the following to first set up namespaces, RBAC, and other scaffolding:
|
||||
```
|
||||
kubectl apply -f examples/common/00-prereqs.yaml
|
||||
```
|
||||
|
||||
Create a Secret, running this command in the local directory of the credentials file you just created:
|
||||
|
||||
```
|
||||
kubectl create secret generic cloud-credentials \
|
||||
--namespace heptio-ark \
|
||||
--from-file cloud=credentials-ark
|
||||
```
|
||||
|
||||
Now that you have your Google Cloud credentials stored in a Secret, you need to replace some placeholder values in the template files. Specifically, you need to change the following:
|
||||
|
||||
* In file `examples/gcp/00-ark-config.yaml`:
|
||||
|
||||
* Replace `<YOUR_BUCKET>` and `<YOUR_PROJECT>`. See the [Config definition][7] for details.
|
||||
|
||||
|
||||
* In file `examples/common/10-deployment.yaml`:
|
||||
|
||||
* Change `spec.template.spec.containers[*].env.name` to "GOOGLE_APPLICATION_CREDENTIALS".
|
||||
|
||||
|
||||
* (Optional) If you are running the Nginx example, in file `examples/nginx-app/with-pv.yaml`:
|
||||
|
||||
* Replace `<YOUR_STORAGE_CLASS_NAME>` with `standard`. This is GCP's default `StorageClass` name.
|
||||
|
||||
### Azure
|
||||
|
||||
#### Service principal creation
|
||||
To integrate Heptio Ark with Azure, you should follow the instructions below to create an Ark-specific [service principal][17].
|
||||
|
||||
1. If you do not have the `az` Azure CLI 2.0 locally installed, follow the [user guide][18] to set it up. Once done, run:
|
||||
|
||||
```
|
||||
az login
|
||||
```
|
||||
|
||||
2. There are seven environment variables that need to be set for Heptio Ark to work properly. The following steps detail how to acquire these, in the process of setting up the necessary RBAC.
|
||||
|
||||
3. List your account:
|
||||
|
||||
```
|
||||
az account list
|
||||
```
|
||||
Save the relevant response values into environment variables: `id` corresponds to `$AZURE_SUBSCRIPTION_ID` and `tenantId` corresponds to `$AZURE_TENANT_ID`.
|
||||
|
||||
4. Assuming that you already have a running Kubernetes cluster on Azure, you should have a corresponding resource group as well. List your current groups to find it:
|
||||
|
||||
```
|
||||
az group list
|
||||
```
|
||||
Get your cluster's group `name` from the response, and use it to set `$AZURE_RESOURCE_GROUP`. (Also note the `location`--this is later used in the Azure-specific portion of the Ark Config).
|
||||
|
||||
5. Create a service principal with the "Contributor" role:
|
||||
|
||||
```
|
||||
az ad sp create-for-rbac --role="Contributor" --name="heptio-ark"
|
||||
```
|
||||
From the response, save `appId` into `$AZURE_CLIENT_ID` and `password` into `$AZURE_CLIENT_SECRET`.
|
||||
|
||||
6. Login into the `heptio-ark` service principal account:
|
||||
|
||||
```
|
||||
az login --service-principal \
|
||||
--username http://heptio-ark \
|
||||
--password $AZURE_CLIENT_SECRET \
|
||||
--tenant $AZURE_TENANT_ID
|
||||
```
|
||||
|
||||
7. Specify a *globally-unique* storage account id and save it in `$AZURE_STORAGE_ACCOUNT_ID`. Then create the storage account, specifying the optional `--location` flag if you do not have defaults from `az configure`:
|
||||
|
||||
```
|
||||
az storage account create \
|
||||
--name $AZURE_STORAGE_ACCOUNT_ID \
|
||||
--resource-group $AZURE_RESOURCE_GROUP \
|
||||
--sku Standard_GRS
|
||||
```
|
||||
You will encounter an error message if the storage account ID is not unique; change it accordingly.
|
||||
|
||||
8. Get the keys for your storage account:
|
||||
|
||||
```
|
||||
az storage account keys list \
|
||||
--account-name $AZURE_STORAGE_ACCOUNT_ID \
|
||||
--resource-group $AZURE_RESOURCE_GROUP
|
||||
```
|
||||
Set `$AZURE_STORAGE_KEY` to any one of the `value`s returned.
|
||||
|
||||
#### Credentials and configuration
|
||||
|
||||
In the Ark root directory, run the following to first set up namespaces, RBAC, and other scaffolding:
|
||||
```
|
||||
kubectl apply -f examples/common/00-prereqs.yaml
|
||||
```
|
||||
|
||||
Now you need to create a Secret that contains all the seven environment variables you just set. The command looks like the following:
|
||||
```
|
||||
kubectl create secret generic cloud-credentials \
|
||||
--namespace heptio-ark \
|
||||
--from-literal AZURE_SUBSCRIPTION_ID=${AZURE_SUBSCRIPTION_ID} \
|
||||
--from-literal AZURE_TENANT_ID=${AZURE_TENANT_ID} \
|
||||
--from-literal AZURE_RESOURCE_GROUP=${AZURE_RESOURCE_GROUP} \
|
||||
--from-literal AZURE_CLIENT_ID=${AZURE_CLIENT_ID} \
|
||||
--from-literal AZURE_CLIENT_SECRET=${AZURE_CLIENT_SECRET} \
|
||||
--from-literal AZURE_STORAGE_ACCOUNT_ID=${AZURE_STORAGE_ACCOUNT_ID} \
|
||||
--from-literal AZURE_STORAGE_KEY=${AZURE_STORAGE_KEY}
|
||||
```
|
||||
|
||||
Now that you have your Azure credentials stored in a Secret, you need to replace some placeholder values in the template files. Specifically, you need to change the following:
|
||||
|
||||
* In file `examples/azure/10-ark-config.yaml`:
|
||||
|
||||
* Replace `<YOUR_BUCKET>`, `<YOUR_LOCATION>`, and `<YOUR_TIMEOUT>`. See the [Config definition][8] for details.
|
||||
|
||||
|
||||
## Run
|
||||
|
||||
### Ark server
|
||||
|
||||
Make sure that you have run `kubectl apply -f examples/common/00-prereqs.yaml` first (this command is incorporated in the previous setup instructions because it creates the necessary namespaces).
|
||||
|
||||
* **AWS and GCP**
|
||||
|
||||
Start the Ark server itself, using the Config from the appropriate cloud-provider-specific directory:
|
||||
```
|
||||
kubectl apply -f examples/common/10-deployment.yaml
|
||||
kubectl apply -f examples/<CLOUD-PROVIDER>/
|
||||
```
|
||||
* **Azure**
|
||||
|
||||
Because Azure loads its credentials differently (from environment variables rather than a file), you need to instead run:
|
||||
```
|
||||
kubectl apply -f examples/azure/
|
||||
```
|
||||
|
||||
### Basic example (No PVs)
|
||||
|
||||
Start the sample nginx app:
|
||||
```
|
||||
kubectl apply -f examples/nginx-app/base.yaml
|
||||
```
|
||||
Now create a backup:
|
||||
```
|
||||
ark backup create nginx-backup --selector app=nginx
|
||||
```
|
||||
Simulate a disaster:
|
||||
```
|
||||
kubectl delete namespaces nginx-example
|
||||
```
|
||||
Now restore your lost resources:
|
||||
```
|
||||
ark restore create nginx-backup
|
||||
```
|
||||
|
||||
### Snapshot example (With PVs)
|
||||
|
||||
> NOTE: For Azure, your Kubernetes cluster needs to be version 1.7.2+ in order to support PV snapshotting of its managed disks.
|
||||
|
||||
Start the sample nginx app:
|
||||
```
|
||||
kubectl apply -f examples/nginx-app/with-pv.yaml
|
||||
```
|
||||
|
||||
Because Kubernetes does not automatically transfer labels from PVCs to dynamically generated PVs, you need to do so manually:
|
||||
```
|
||||
nginx_pv_name=$(kubectl get pv -o jsonpath='{.items[?(@.spec.claimRef.name=="nginx-logs")].metadata.name}')
|
||||
kubectl label pv $nginx_pv_name app=nginx
|
||||
```
|
||||
Now create a backup with PV snapshotting:
|
||||
```
|
||||
ark backup create nginx-backup --selector app=nginx
|
||||
```
|
||||
Simulate a disaster:
|
||||
```
|
||||
kubectl delete namespaces nginx-example
|
||||
kubectl delete pv $nginx_pv_name
|
||||
```
|
||||
Because the default [reclaim policy][19] for dynamically-provisioned PVs is "Delete", the above commands should trigger your cloud provider to delete the disk backing the PV. The deletion process is asynchronous so this may take some time. **Before continuing to the next step, check your cloud provider (via dashboard or CLI) to confirm that the disk no longer exists.**
|
||||
|
||||
Now restore your lost resources:
|
||||
```
|
||||
ark restore create nginx-backup
|
||||
```
|
||||
|
||||
[0]: /README.md#quickstart
|
||||
[1]: #aws
|
||||
[2]: #gcp
|
||||
[3]: #azure
|
||||
[4]: /examples/aws
|
||||
[5]: http://docs.aws.amazon.com/cli/latest/userguide/installing.html
|
||||
[6]: config-definition.md#aws
|
||||
[7]: config-definition.md#gcp
|
||||
[8]: config-definition.md#azure
|
||||
[9]: #ark-server
|
||||
[10]: #basic-example-no-pvs
|
||||
[11]: #snapshot-example-with-pvs
|
||||
[12]: #setup
|
||||
[13]: #run
|
||||
[14]: http://docs.aws.amazon.com/IAM/latest/UserGuide/introduction.html
|
||||
[15]: https://cloud.google.com/compute/docs/access/service-accounts
|
||||
[16]: https://cloud.google.com/compute/docs/gcloud-compute
|
||||
[17]: https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-application-objects
|
||||
[18]: https://docs.microsoft.com/en-us/azure/storage/storage-azure-cli
|
||||
[19]: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#reclaiming
|
||||
[20]: /CHANGELOG.md
|
||||
[21]: /docs/build-from-scratch.md
|
||||
|
||||
@@ -1,78 +0,0 @@
|
||||
# Concepts
|
||||
|
||||
* [Overview][0]
|
||||
* [Operation types][1]
|
||||
* [1. Backups][2]
|
||||
* [2. Schedules][3]
|
||||
* [3. Restores][4]
|
||||
* [API types][9]
|
||||
* [Expired backup deletion][5]
|
||||
* [Cloud storage sync][6]
|
||||
|
||||
## Overview
|
||||
|
||||
Heptio Ark provides customizable degrees of recovery for all Kubernetes API objects (Pods, Deployments, Jobs, Custom Resource Definitions, etc.), as well as for persistent volumes. This recovery can be cluster-wide, or fine-tuned according to object type, namespace, or labels.
|
||||
|
||||
Ark is ideal for the disaster recovery use case, as well as for snapshotting your application state, prior to performing system operations on your cluster (e.g. upgrades).
|
||||
|
||||
## Operation types
|
||||
|
||||
This section gives a quick overview of the Ark operation types.
|
||||
|
||||
### 1. Backups
|
||||
The *backup* operation (1) uploads a tarball of copied Kubernetes resources into cloud object storage and (2) uses the cloud provider API to make disk snapshots of persistent volumes, if specified. [Annotations][8] are cleared for PVs but kept for all other object types.
|
||||
|
||||
You can optionally specify hooks that should be executed during the backup. For example, you may
|
||||
need to tell a database to flush its in-memory buffers to disk prior to taking a snapshot. You can
|
||||
find more information about hooks [here][11].
|
||||
|
||||
Some things to be aware of:
|
||||
* *Cluster backups are not strictly atomic.* If API objects are being created or edited at the time of backup, they may or not be included in the backup. In practice, backups happen very quickly and so the odds of capturing inconsistent information are low, but still possible.
|
||||
|
||||
* *A backup usually takes no more than a few seconds.* The snapshotting process for persistent volumes is asynchronous, so the runtime of the `ark backup` command isn't dependent on disk size.
|
||||
|
||||
These ad-hoc backups are saved with the `<BACKUP NAME>` specified during creation.
|
||||
|
||||
|
||||
### 2. Schedules
|
||||
The *schedule* operation allows you to back up your data at recurring intervals. The first backup is performed when the schedule is first created, and subsequent backups happen at the schedule's specified interval. These intervals are specified by a Cron expression.
|
||||
|
||||
A Schedule acts as a wrapper for Backups; when triggered, it creates them behind the scenes.
|
||||
|
||||
Scheduled backups are saved with the name `<SCHEDULE NAME>-<TIMESTAMP>`, where `<TIMESTAMP>` is formatted as *YYYYMMDDhhmmss*.
|
||||
|
||||
### 3. Restores
|
||||
The *restore* operation allows you to restore all of the objects and persistent volumes from a previously created Backup. Heptio Ark supports multiple namespace remapping--for example, in a single restore, objects in namespace "abc" can be recreated under namespace "def", and the ones in "123" under "456".
|
||||
|
||||
Kubernetes API objects that have been restored can be identified with a label that looks like `ark-restore=<BACKUP NAME>-<TIMESTAMP>`, where `<TIMESTAMP>` is formatted as *YYYYMMDDhhmmss*.
|
||||
|
||||
You can also run the Ark server in *restore-only* mode, which disables backup, schedule, and garbage collection functionality during disaster recovery.
|
||||
|
||||
## API types
|
||||
|
||||
For information about the individual API types Ark uses, please see the [API types reference][10].
|
||||
|
||||
## Expired backup deletion
|
||||
|
||||
When first creating a backup, you can specify a TTL. If Ark sees that an existing Backup resource has expired, it removes both:
|
||||
* The Backup resource itself
|
||||
* The actual backup file from cloud object storage
|
||||
|
||||
## Cloud storage sync
|
||||
|
||||
Heptio Ark treats object storage as the source of truth. It continuously checks to see that the correct Backup resources are always present. If there is a properly formatted backup file in the storage bucket, but no corresponding Backup resources in the Kubernetes API, Ark synchronizes the information from object storage to Kubernetes.
|
||||
|
||||
This allows *restore* functionality to work in a cluster migration scenario, where the original Backup objects do not exist in the new cluster. See the [use case guide][7] for details.
|
||||
|
||||
[0]: #overview
|
||||
[1]: #operation-types
|
||||
[2]: #1-backups
|
||||
[3]: #2-schedules
|
||||
[4]: #3-restores
|
||||
[5]: #expired-backup-deletion
|
||||
[6]: #cloud-storage-sync
|
||||
[7]: use-cases.md#cluster-migration
|
||||
[8]: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
||||
[9]: #api-types
|
||||
[10]: api-types/
|
||||
[11]: hooks.md
|
||||
@@ -1,114 +0,0 @@
|
||||
# Ark Config definition
|
||||
|
||||
* [Overview][8]
|
||||
* [Example][9]
|
||||
* [Parameter Reference][6]
|
||||
* [Main config][7]
|
||||
* [AWS][0]
|
||||
* [GCP][1]
|
||||
* [Azure][2]
|
||||
|
||||
## Overview
|
||||
|
||||
Heptio Ark defines its own Config object (a custom resource) for specifying Ark backup and cloud provider settings. When the Ark server is first deployed, it waits until you create a Config--specifically one named `default`--in the `heptio-ark` namespace.
|
||||
|
||||
> *NOTE*: There is an underlying assumption that you're running the Ark server as a Kubernetes deployment. If the `default` Config is modified, the server shuts down gracefully. Once the kubelet restarts the Ark server pod, the server then uses the updated Config values.
|
||||
|
||||
## Example
|
||||
|
||||
A sample YAML `Config` looks like the following:
|
||||
```
|
||||
apiVersion: ark.heptio.com/v1
|
||||
kind: Config
|
||||
metadata:
|
||||
namespace: heptio-ark
|
||||
name: default
|
||||
persistentVolumeProvider:
|
||||
aws:
|
||||
region: us-west-2
|
||||
backupStorageProvider:
|
||||
bucket: ark
|
||||
aws:
|
||||
region: us-west-2
|
||||
backupSyncPeriod: 60m
|
||||
gcSyncPeriod: 60m
|
||||
scheduleSyncPeriod: 1m
|
||||
restoreOnlyMode: false
|
||||
```
|
||||
|
||||
## Parameter Reference
|
||||
|
||||
The configurable parameters are as follows:
|
||||
|
||||
### Main config parameters
|
||||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| `persistentVolumeProvider` | CloudProviderConfig<br><br>(Supported key values are `aws`, `gcp`, and `azure`, but only one can be present. See the corresponding [AWS][0], [GCP][1], and [Azure][2]-specific configs.) | None (Optional) | The specification for whichever cloud provider the cluster is using for persistent volumes (to be snapshotted), if any.<br><br>If not specified, Backups and Restores requesting PV snapshots & restores, respectively, are considered invalid. <br><br> *NOTE*: For Azure, your Kubernetes cluster needs to be version 1.7.2+ in order to support PV snapshotting of its managed disks. |
|
||||
| `backupStorageProvider`/(inline) | CloudProviderConfig<br><br>(Supported key values are `aws`, `gcp`, and `azure`, but only one can be present. See the corresponding [AWS][0], [GCP][1], and [Azure][2]-specific configs.) | Required Field | The specification for whichever cloud provider will be used to actually store the backups. |
|
||||
| `backupStorageProvider/bucket` | String | Required Field | The storage bucket where backups are to be uploaded. |
|
||||
| `backupSyncPeriod` | metav1.Duration | 60m0s | How frequently Ark queries the object storage to make sure that the appropriate Backup resources have been created for existing backup files. |
|
||||
| `gcSyncPeriod` | metav1.Duration | 60m0s | How frequently Ark queries the object storage to delete backup files that have passed their TTL. |
|
||||
| `scheduleSyncPeriod` | metav1.Duration | 1m0s | How frequently Ark checks its Schedule resource objects to see if a backup needs to be initiated. |
|
||||
| `resourcePriorities` | []string | `[namespaces, persistentvolumes, persistentvolumeclaims, secrets, configmaps]` | An ordered list that describes the order in which Kubernetes resource objects should be restored (also specified with the `<RESOURCE>.<GROUP>` format.<br><br>If a resource is not in this list, it is restored after all other prioritized resources. |
|
||||
| `restoreOnlyMode` | bool | `false` | When RestoreOnly mode is on, functionality for backups, schedules, and expired backup deletion is *turned off*. Restores are made from existing backup files in object storage. |
|
||||
|
||||
### AWS
|
||||
|
||||
**(Or other S3-compatible storage)**
|
||||
|
||||
#### backupStorageProvider
|
||||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| `region` | string | Required Field | *Example*: "us-east-1"<br><br>See [AWS documentation][3] for the full list. |
|
||||
| `disableSSL` | bool | `false` | Set this to `true` if you are using Minio (or another local, S3-compatible storage service) and your deployment is not secured. |
|
||||
| `s3ForcePathStyle` | bool | `false` | Set this to `true` if you are using a local storage service like Minio. |
|
||||
| `s3Url` | string | Required field for non-AWS-hosted storage| *Example*: http://minio:9000<br><br>You can specify the AWS S3 URL here for explicitness, but Ark can already generate it from `region`, and `bucket`. This field is primarily for local storage services like Minio.|
|
||||
| `kmsKeyId` | string | Empty | *Example*: "502b409c-4da1-419f-a16e-eif453b3i49f"<br><br>Specify an [AWS KMS key][10] id to enable encryption of the backups stored in S3. Only works with AWS S3 and may require explicitly granting key usage rights.|
|
||||
|
||||
#### persistentVolumeProvider (AWS Only)
|
||||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| `region` | string | Required Field | *Example*: "us-east-1"<br><br>See [AWS documentation][3] for the full list. |
|
||||
|
||||
### GCP
|
||||
|
||||
#### backupStorageProvider
|
||||
|
||||
No parameters required; specify an empty object per [example file][11].
|
||||
|
||||
#### persistentVolumeProvider
|
||||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| `project` | string | Required Field | *Example*: "project-example-3jsn23"<br><br> See the [Project ID documentation][4] for details. |
|
||||
|
||||
### Azure
|
||||
|
||||
#### backupStorageProvider
|
||||
|
||||
No parameters required; specify an empty object per [example file][12].
|
||||
|
||||
#### persistentVolumeProvider
|
||||
|
||||
| Key | Type | Default | Meaning |
|
||||
| --- | --- | --- | --- |
|
||||
| `location` | string | Required Field | *Example*: "Canada East"<br><br>See [the list of available locations][5] (note that this particular page refers to them as "Regions"). |
|
||||
| `apiTimeout` | metav1.Duration | 2m0s | How long to wait for an Azure API request to complete before timeout. |
|
||||
|
||||
[0]: #aws
|
||||
[1]: #gcp
|
||||
[2]: #azure
|
||||
[3]: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions
|
||||
[4]: https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects
|
||||
[5]: https://azure.microsoft.com/en-us/regions/
|
||||
[6]: #parameter-reference
|
||||
[7]: #main-config-parameters
|
||||
[8]: #overview
|
||||
[9]: #example
|
||||
[10]: http://docs.aws.amazon.com/kms/latest/developerguide/overview.html
|
||||
[11]: ../examples/gcp/00-ark-config.yaml
|
||||
[12]: ../examples/azure/10-ark-config.yaml
|
||||
|
||||
59
docs/debugging-install.md
Normal file
59
docs/debugging-install.md
Normal file
@@ -0,0 +1,59 @@
|
||||
# Debugging Installation Issues
|
||||
|
||||
## General
|
||||
|
||||
### `invalid configuration: no configuration has been provided`
|
||||
This typically means that no `kubeconfig` file can be found for the Ark client to use. Ark looks for a kubeconfig in the
|
||||
following locations:
|
||||
* the path specified by the `--kubeconfig` flag, if any
|
||||
* the path specified by the `$KUBECONFIG` environment variable, if any
|
||||
* `~/.kube/config`
|
||||
|
||||
### Backups or restores stuck in `New` phase
|
||||
This means that the Ark controllers are not processing the backups/restores, which usually happens because the Ark server is not running. Check the pod description and logs for errors:
|
||||
```
|
||||
kubectl -n heptio-ark describe pods
|
||||
kubectl -n heptio-ark logs deployment/ark
|
||||
```
|
||||
|
||||
|
||||
## AWS
|
||||
|
||||
### `NoCredentialProviders: no valid providers in chain`
|
||||
This means that the secret containing the AWS IAM user credentials for Ark has not been created/mounted properly
|
||||
into the Ark server pod. Ensure the following:
|
||||
* The `cloud-credentials` secret exists in the Ark server's namespace
|
||||
* The `cloud-credentials` secret has a single key, `cloud`, whose value is the contents of the `credentials-ark` file
|
||||
* The `credentials-ark` file is formatted properly and has the correct values:
|
||||
|
||||
```
|
||||
[default]
|
||||
aws_access_key_id=<your AWS access key ID>
|
||||
aws_secret_access_key=<your AWS secret access key>
|
||||
```
|
||||
* The `cloud-credentials` secret is defined as a volume for the Ark deployment
|
||||
* The `cloud-credentials` secret is being mounted into the Ark server pod at `/credentials`
|
||||
|
||||
|
||||
## Azure
|
||||
|
||||
### `Failed to refresh the Token` or `adal: Refresh request failed`
|
||||
This means that the secrets containing the Azure service principal credentials for Ark has not been created/mounted
|
||||
properly into the Ark server pod. Ensure the following:
|
||||
* The `cloud-credentials` secret exists in the Ark server's namespace
|
||||
* The `cloud-credentials` secret has all of the expected keys and each one has the correct value (see [setup instructions](0))
|
||||
* The `cloud-credentials` secret is defined as a volume for the Ark deployment
|
||||
* The `cloud-credentials` secret is being mounted into the Ark server pod at `/credentials`
|
||||
|
||||
|
||||
## GCE/GKE
|
||||
|
||||
### `open credentials/cloud: no such file or directory`
|
||||
This means that the secret containing the GCE service account credentials for Ark has not been created/mounted properly
|
||||
into the Ark server pod. Ensure the following:
|
||||
* The `cloud-credentials` secret exists in the Ark server's namespace
|
||||
* The `cloud-credentials` secret has a single key, `cloud`, whose value is the contents of the `credentials-ark` file
|
||||
* The `cloud-credentials` secret is defined as a volume for the Ark deployment
|
||||
* The `cloud-credentials` secret is being mounted into the Ark server pod at `/credentials`
|
||||
|
||||
[0]: azure-config#credentials-and-configuration
|
||||
@@ -15,37 +15,89 @@ backup-test-2-20170726180514 backup-test-2 Completed 0 0 2
|
||||
backup-test-2-20170726180515 backup-test-2 Completed 0 1 2017-07-26 13:32:59 -0400 EDT <none>
|
||||
```
|
||||
|
||||
To delve into the warnings and errors into more detail, you can use the `-o` option:
|
||||
To delve into the warnings and errors into more detail, you can use `ark restore describe`:
|
||||
```
|
||||
kubectl restore get backup-test-20170726180512 -o yaml
|
||||
ark restore describe backup-test-20170726180512
|
||||
```
|
||||
The output YAML has a `status` field which may look like the following:
|
||||
The output looks like this:
|
||||
```
|
||||
status:
|
||||
errors:
|
||||
ark: null
|
||||
cluster: null
|
||||
namespaces: null
|
||||
phase: Completed
|
||||
validationErrors: null
|
||||
warnings:
|
||||
ark: null
|
||||
cluster: null
|
||||
namespaces:
|
||||
cm1:
|
||||
- secrets "default-token-t0slk" already exists
|
||||
Name: backup-test-20170726180512
|
||||
Namespace: heptio-ark
|
||||
Labels: <none>
|
||||
Annotations: <none>
|
||||
|
||||
Backup: backup-test
|
||||
|
||||
Namespaces:
|
||||
Included: *
|
||||
Excluded: <none>
|
||||
|
||||
Resources:
|
||||
Included: serviceaccounts
|
||||
Excluded: nodes, events, events.events.k8s.io
|
||||
Cluster-scoped: auto
|
||||
|
||||
Namespace mappings: <none>
|
||||
|
||||
Label selector: <none>
|
||||
|
||||
Restore PVs: auto
|
||||
|
||||
Phase: Completed
|
||||
|
||||
Validation errors: <none>
|
||||
|
||||
Warnings:
|
||||
Ark: <none>
|
||||
Cluster: <none>
|
||||
Namespaces:
|
||||
heptio-ark: serviceaccounts "ark" already exists
|
||||
serviceaccounts "default" already exists
|
||||
kube-public: serviceaccounts "default" already exists
|
||||
kube-system: serviceaccounts "attachdetach-controller" already exists
|
||||
serviceaccounts "certificate-controller" already exists
|
||||
serviceaccounts "cronjob-controller" already exists
|
||||
serviceaccounts "daemon-set-controller" already exists
|
||||
serviceaccounts "default" already exists
|
||||
serviceaccounts "deployment-controller" already exists
|
||||
serviceaccounts "disruption-controller" already exists
|
||||
serviceaccounts "endpoint-controller" already exists
|
||||
serviceaccounts "generic-garbage-collector" already exists
|
||||
serviceaccounts "horizontal-pod-autoscaler" already exists
|
||||
serviceaccounts "job-controller" already exists
|
||||
serviceaccounts "kube-dns" already exists
|
||||
serviceaccounts "namespace-controller" already exists
|
||||
serviceaccounts "node-controller" already exists
|
||||
serviceaccounts "persistent-volume-binder" already exists
|
||||
serviceaccounts "pod-garbage-collector" already exists
|
||||
serviceaccounts "replicaset-controller" already exists
|
||||
serviceaccounts "replication-controller" already exists
|
||||
serviceaccounts "resourcequota-controller" already exists
|
||||
serviceaccounts "service-account-controller" already exists
|
||||
serviceaccounts "service-controller" already exists
|
||||
serviceaccounts "statefulset-controller" already exists
|
||||
serviceaccounts "ttl-controller" already exists
|
||||
default: serviceaccounts "default" already exists
|
||||
|
||||
Errors:
|
||||
Ark: <none>
|
||||
Cluster: <none>
|
||||
Namespaces: <none>
|
||||
```
|
||||
|
||||
## Structure
|
||||
The `status` field in a Restore's YAML has subfields for `errors` and `warnings`. `errors` appear for incomplete or partial restores. `warnings` appear for non-blocking issues (e.g. the restore looks "normal" and all resources referenced in the backup exist in some form, although some of them may have been pre-existing).
|
||||
|
||||
Both `errors` and `warnings` are structured in the same way:
|
||||
Errors appear for incomplete or partial restores. Warnings appear for non-blocking issues (e.g. the
|
||||
restore looks "normal" and all resources referenced in the backup exist in some form, although some
|
||||
of them may have been pre-existing).
|
||||
|
||||
* `ark`: A list of system-related issues encountered by the Ark server (e.g. couldn't read directory).
|
||||
Both errors and warnings are structured in the same way:
|
||||
|
||||
* `cluster`: A list of issues related to the restore of cluster-scoped resources.
|
||||
* `Ark`: A list of system-related issues encountered by the Ark server (e.g. couldn't read directory).
|
||||
|
||||
* `namespaces`: A map of namespaces to the list of issues related to the restore of their respective resources.
|
||||
* `Cluster`: A list of issues related to the restore of cluster-scoped resources.
|
||||
|
||||
* `Namespaces`: A map of namespaces to the list of issues related to the restore of their respective resources.
|
||||
|
||||
[0]: #example
|
||||
[1]: #structure
|
||||
|
||||
24
docs/disaster-case.md
Normal file
24
docs/disaster-case.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# Disaster recovery
|
||||
|
||||
*Using Schedules and Restore-Only Mode*
|
||||
|
||||
If you periodically back up your cluster's resources, you are able to return to a previous state in case of some unexpected mishap, such as a service outage. Doing so with Heptio Ark looks like the following:
|
||||
|
||||
1. After you first run the Ark server on your cluster, set up a daily backup (replacing `<SCHEDULE NAME>` in the command as desired):
|
||||
|
||||
```
|
||||
ark schedule create <SCHEDULE NAME> --schedule "0 7 * * *"
|
||||
```
|
||||
This creates a Backup object with the name `<SCHEDULE NAME>-<TIMESTAMP>`.
|
||||
|
||||
1. A disaster happens and you need to recreate your resources.
|
||||
|
||||
1. Update the Ark server deployment, adding the argument for the `server` command flag `restore-only` set to `true`. This prevents Backup objects from being created or deleted during your Restore process.
|
||||
|
||||
1. Create a restore with your most recent Ark Backup:
|
||||
```
|
||||
ark restore create --from-backup <SCHEDULE NAME>-<TIMESTAMP>
|
||||
```
|
||||
|
||||
|
||||
|
||||
50
docs/expose-minio.md
Normal file
50
docs/expose-minio.md
Normal file
@@ -0,0 +1,50 @@
|
||||
# Expose Minio outside your cluster
|
||||
|
||||
When you run commands to get logs or describe a backup, the Ark server generates a pre-signed URL to download the requested items. To access these URLs from outside the cluster -- that is, from your Ark client -- you need to make Minio available outside the cluster. You can:
|
||||
|
||||
- Change the Minio Service type from `ClusterIP` to `NodePort`.
|
||||
- Set up Ingress for your cluster, keeping Minio Service type `ClusterIP`.
|
||||
|
||||
In Ark 0.10, you can also specify the value of a new `publicUrl` field for the pre-signed URL in your backup storage config.
|
||||
|
||||
For basic instructions on how to install the Ark server and client, see [the getting started example][1].
|
||||
|
||||
## Expose Minio with Service of type NodePort
|
||||
|
||||
The Minio deployment by default specifies a Service of type `ClusterIP`. You can change this to `NodePort` to easily expose a cluster service externally if you can reach the node from your Ark client.
|
||||
|
||||
You must also get the Minio URL, which you can then specify as the value of the new `publicUrl` field in your backup storage config.
|
||||
|
||||
1. In `examples/minio/00-minio-deployment.yaml`, change the value of Service `spec.type` from `ClusterIP` to `NodePort`.
|
||||
|
||||
1. Get the Minio URL:
|
||||
|
||||
- if you're running Minikube:
|
||||
|
||||
```shell
|
||||
minikube service minio --namespace=heptio-ark --url
|
||||
```
|
||||
|
||||
- in any other environment:
|
||||
|
||||
1. Get the value of an external IP address or DNS name of any node in your cluster. You must be able to reach this address from the Ark client.
|
||||
|
||||
1. Append the value of the NodePort to get a complete URL. You can get this value by running:
|
||||
|
||||
```shell
|
||||
kubectl -n heptio-ark get svc/minio -o jsonpath='{.spec.ports[0].nodePort}'
|
||||
```
|
||||
|
||||
1. In `examples/minio/05-ark-backupstoragelocation.yaml`, uncomment the `publicUrl` line and provide this Minio URL as the value of the `publicUrl` field. You must include the `http://` or `https://` prefix.
|
||||
|
||||
## Work with Ingress
|
||||
|
||||
Configuring Ingress for your cluster is out of scope for the Ark documentation. If you have already set up Ingress, however, it makes sense to continue with it while you run the example Ark configuration with Minio.
|
||||
|
||||
In this case:
|
||||
|
||||
1. Keep the Service type as `ClusterIP`.
|
||||
|
||||
1. In `examples/minio/05-ark-backupstoragelocation.yaml`, uncomment the `publicUrl` line and provide the URL and port of your Ingress as the value of the `publicUrl` field.
|
||||
|
||||
[1]: get-started.md
|
||||
9
docs/extend.md
Normal file
9
docs/extend.md
Normal file
@@ -0,0 +1,9 @@
|
||||
# Extend Ark
|
||||
|
||||
Ark includes mechanisms for extending the core functionality to meet your individual backup/restore needs:
|
||||
|
||||
* [Hooks][27] allow you to specify commands to be executed within running pods during a backup. This is useful if you need to run a workload-specific command prior to taking a backup (for example, to flush disk buffers or to freeze a database).
|
||||
* [Plugins][28] allow you to develop custom object/block storage back-ends or per-item backup/restore actions that can execute arbitrary logic, including modifying the items being backed up/restored. Plugins can be used by Ark without needing to be compiled into the core Ark binary.
|
||||
|
||||
[27]: hooks.md
|
||||
[28]: plugins.md
|
||||
16
docs/faq.md
16
docs/faq.md
@@ -3,7 +3,7 @@
|
||||
## When is it appropriate to use Ark instead of etcd's built in backup/restore?
|
||||
|
||||
Etcd's backup/restore tooling is good for recovering from data loss in a single etcd cluster. For
|
||||
example, it is a good idea to take a backup of etcd prior to upgrading etcd istelf. For more
|
||||
example, it is a good idea to take a backup of etcd prior to upgrading etcd itself. For more
|
||||
sophisticated management of your Kubernetes cluster backups and restores, we feel that Ark is
|
||||
generally a better approach. It gives you the ability to throw away an unstable cluster and restore
|
||||
your Kubernetes resources and data into a new cluster, which you can't do easily just by backing up
|
||||
@@ -22,4 +22,16 @@ Examples of cases where Ark is useful:
|
||||
|
||||
Yes, with some exceptions. For example, when Ark restores pods it deletes the `nodeName` from the
|
||||
pod so that it can be scheduled onto a new node. You can see some more examples of the differences
|
||||
in [pod_restorer.go](https://github.com/heptio/ark/blob/master/pkg/restore/restorers/pod_restorer.go)
|
||||
in [pod_action.go](https://github.com/heptio/ark/blob/master/pkg/restore/pod_action.go)
|
||||
|
||||
## I'm using Ark in multiple clusters. Should I use the same bucket to store all of my backups?
|
||||
|
||||
We **strongly** recommend that you use a separate bucket per cluster to store backups. Sharing a bucket
|
||||
across multiple Ark instances can lead to numerous problems - failed backups, overwritten backups,
|
||||
inadvertently deleted backups, etc., all of which can be avoided by using a separate bucket per Ark
|
||||
instance.
|
||||
|
||||
Related to this, if you need to restore a backup from cluster A into cluster B, please use restore-only
|
||||
mode in cluster B's Ark instance (via the `--restore-only` flag on the `ark server` command specified
|
||||
in your Ark deployment) while it's configured to use cluster A's bucket. This will ensure no
|
||||
new backups are created, and no existing backups are deleted or overwritten.
|
||||
|
||||
142
docs/gcp-config.md
Normal file
142
docs/gcp-config.md
Normal file
@@ -0,0 +1,142 @@
|
||||
# Run Ark on GCP
|
||||
|
||||
You can run Kubernetes on Google Cloud Platform in either:
|
||||
|
||||
* Kubernetes on Google Compute Engine virtual machines
|
||||
* Google Kubernetes Engine
|
||||
|
||||
If you do not have the `gcloud` and `gsutil` CLIs locally installed, follow the [user guide][16] to set them up.
|
||||
|
||||
## Create GCS bucket
|
||||
|
||||
Heptio Ark requires an object storage bucket in which to store backups, preferably unique to a single Kubernetes cluster (see the [FAQ][20] for more details). Create a GCS bucket, replacing the <YOUR_BUCKET> placeholder with the name of your bucket:
|
||||
|
||||
```bash
|
||||
BUCKET=<YOUR_BUCKET>
|
||||
|
||||
gsutil mb gs://$BUCKET/
|
||||
```
|
||||
|
||||
## Create service account
|
||||
|
||||
To integrate Heptio Ark with GCP, create an Ark-specific [Service Account][15]:
|
||||
|
||||
1. View your current config settings:
|
||||
|
||||
```bash
|
||||
gcloud config list
|
||||
```
|
||||
|
||||
Store the `project` value from the results in the environment variable `$PROJECT_ID`.
|
||||
|
||||
```bash
|
||||
PROJECT_ID=$(gcloud config get-value project)
|
||||
```
|
||||
|
||||
2. Create a service account:
|
||||
|
||||
```bash
|
||||
gcloud iam service-accounts create heptio-ark \
|
||||
--display-name "Heptio Ark service account"
|
||||
```
|
||||
|
||||
> If you'll be using Ark to backup multiple clusters with multiple GCS buckets, it may be desirable to create a unique username per cluster rather than the default `heptio-ark`.
|
||||
|
||||
Then list all accounts and find the `heptio-ark` account you just created:
|
||||
```bash
|
||||
gcloud iam service-accounts list
|
||||
```
|
||||
|
||||
Set the `$SERVICE_ACCOUNT_EMAIL` variable to match its `email` value.
|
||||
|
||||
```bash
|
||||
SERVICE_ACCOUNT_EMAIL=$(gcloud iam service-accounts list \
|
||||
--filter="displayName:Heptio Ark service account" \
|
||||
--format 'value(email)')
|
||||
```
|
||||
|
||||
3. Attach policies to give `heptio-ark` the necessary permissions to function:
|
||||
|
||||
```bash
|
||||
|
||||
ROLE_PERMISSIONS=(
|
||||
compute.disks.get
|
||||
compute.disks.create
|
||||
compute.disks.createSnapshot
|
||||
compute.snapshots.get
|
||||
compute.snapshots.create
|
||||
compute.snapshots.useReadOnly
|
||||
compute.snapshots.delete
|
||||
)
|
||||
|
||||
gcloud iam roles create heptio_ark.server \
|
||||
--project $PROJECT_ID \
|
||||
--title "Heptio Ark Server" \
|
||||
--permissions "$(IFS=","; echo "${ROLE_PERMISSIONS[*]}")"
|
||||
|
||||
gcloud projects add-iam-policy-binding $PROJECT_ID \
|
||||
--member serviceAccount:$SERVICE_ACCOUNT_EMAIL \
|
||||
--role projects/$PROJECT_ID/roles/heptio_ark.server
|
||||
|
||||
gsutil iam ch serviceAccount:$SERVICE_ACCOUNT_EMAIL:objectAdmin gs://${BUCKET}
|
||||
```
|
||||
|
||||
4. Create a service account key, specifying an output file (`credentials-ark`) in your local directory:
|
||||
|
||||
```bash
|
||||
gcloud iam service-accounts keys create credentials-ark \
|
||||
--iam-account $SERVICE_ACCOUNT_EMAIL
|
||||
```
|
||||
|
||||
## Credentials and configuration
|
||||
|
||||
If you run Google Kubernetes Engine (GKE), make sure that your current IAM user is a cluster-admin. This role is required to create RBAC objects.
|
||||
See [the GKE documentation][22] for more information.
|
||||
|
||||
In the Ark directory (i.e. where you extracted the release tarball), run the following to first set up namespaces, RBAC, and other scaffolding. To run in a custom namespace, make sure that you have edited the YAML files to specify the namespace. See [Run in custom namespace][0].
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/common/00-prereqs.yaml
|
||||
```
|
||||
|
||||
Create a Secret. In the directory of the credentials file you just created, run:
|
||||
|
||||
```bash
|
||||
kubectl create secret generic cloud-credentials \
|
||||
--namespace heptio-ark \
|
||||
--from-file cloud=credentials-ark
|
||||
```
|
||||
|
||||
**Note: If you use a custom namespace, replace `heptio-ark` with the name of the custom namespace**
|
||||
|
||||
Specify the following values in the example files:
|
||||
|
||||
* In file `config/gcp/05-ark-backupstoragelocation.yaml`:
|
||||
|
||||
* Replace `<YOUR_BUCKET>`. See the [BackupStorageLocation definition][7] for details.
|
||||
|
||||
* (Optional) If you run the nginx example, in file `config/nginx-app/with-pv.yaml`:
|
||||
|
||||
* Replace `<YOUR_STORAGE_CLASS_NAME>` with `standard`. This is GCP's default `StorageClass` name.
|
||||
|
||||
* (Optional, use only if you need to specify multiple volume snapshot locations) In `config/gcp/10-deployment.yaml`:
|
||||
|
||||
* Uncomment the `--default-volume-snapshot-locations` and replace provider locations with the values for your environment.
|
||||
|
||||
## Start the server
|
||||
|
||||
In the root of your Ark directory, run:
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/gcp/05-ark-backupstoragelocation.yaml
|
||||
kubectl apply -f config/gcp/06-ark-volumesnapshotlocation.yaml
|
||||
kubectl apply -f config/gcp/10-deployment.yaml
|
||||
```
|
||||
|
||||
[0]: namespace.md
|
||||
[7]: api-types/backupstoragelocation.md#gcp
|
||||
[15]: https://cloud.google.com/compute/docs/access/service-accounts
|
||||
[16]: https://cloud.google.com/sdk/docs/
|
||||
[20]: faq.md
|
||||
[22]: https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#prerequisites_for_using_role-based_access_control
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 Heptio Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/heptio/ark/pkg/cmd/ark"
|
||||
"github.com/spf13/cobra/doc"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cmdName := os.Args[1]
|
||||
outputDir := os.Args[2]
|
||||
|
||||
cmd := ark.NewCommand(cmdName)
|
||||
// Remove auto-generated timestamps
|
||||
cmd.DisableAutoGenTag = true
|
||||
|
||||
err := doc.GenMarkdownTree(cmd, outputDir)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
175
docs/get-started.md
Normal file
175
docs/get-started.md
Normal file
@@ -0,0 +1,175 @@
|
||||
## Getting started
|
||||
|
||||
The following example sets up the Ark server and client, then backs up and restores a sample application.
|
||||
|
||||
For simplicity, the example uses Minio, an S3-compatible storage service that runs locally on your cluster.
|
||||
For additional functionality with this setup, see the docs on how to [expose Minio outside your cluster][31].
|
||||
|
||||
**NOTE** The example lets you explore basic Ark functionality. Configuring Minio for production is out of scope.
|
||||
|
||||
See [Set up Ark on your platform][3] for how to configure Ark for a production environment.
|
||||
|
||||
If you encounter issues with installing or configuring, see [Debugging Installation Issues](debugging-install.md).
|
||||
|
||||
### Prerequisites
|
||||
|
||||
* Access to a Kubernetes cluster, version 1.7 or later. Version 1.7.5 or later is required to run `ark backup delete`.
|
||||
* A DNS server on the cluster
|
||||
* `kubectl` installed
|
||||
|
||||
### Download
|
||||
|
||||
1. Download the [latest release's][26] tarball for your platform.
|
||||
|
||||
1. Extract the tarball:
|
||||
```bash
|
||||
tar -xzf <RELEASE-TARBALL-NAME>.tar.gz -C /dir/to/extract/to
|
||||
```
|
||||
We'll refer to the directory you extracted to as the "Ark directory" in subsequent steps.
|
||||
|
||||
1. Move the `ark` binary from the Ark directory to somewhere in your PATH.
|
||||
|
||||
#### MacOS Installation
|
||||
|
||||
On Mac, you can use [HomeBrew](https://brew.sh) to install the `ark` client:
|
||||
```bash
|
||||
brew install ark
|
||||
```
|
||||
|
||||
### Set up server
|
||||
|
||||
These instructions start the Ark server and a Minio instance that is accessible from within the cluster only. See [Expose Minio outside your cluster][31] for information about configuring your cluster for outside access to Minio. Outside access is required to access logs and run `ark describe` commands.
|
||||
|
||||
1. Start the server and the local storage service. In the Ark directory, run:
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/common/00-prereqs.yaml
|
||||
kubectl apply -f config/minio/
|
||||
```
|
||||
|
||||
1. Deploy the example nginx application:
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/nginx-app/base.yaml
|
||||
```
|
||||
|
||||
1. Check to see that both the Ark and nginx deployments are successfully created:
|
||||
|
||||
```
|
||||
kubectl get deployments -l component=ark --namespace=heptio-ark
|
||||
kubectl get deployments --namespace=nginx-example
|
||||
```
|
||||
|
||||
### Back up
|
||||
|
||||
1. Create a backup for any object that matches the `app=nginx` label selector:
|
||||
|
||||
```
|
||||
ark backup create nginx-backup --selector app=nginx
|
||||
```
|
||||
|
||||
Alternatively if you want to backup all objects *except* those matching the label `backup=ignore`:
|
||||
|
||||
```
|
||||
ark backup create nginx-backup --selector 'backup notin (ignore)'
|
||||
```
|
||||
|
||||
1. (Optional) Create regularly scheduled backups based on a cron expression using the `app=nginx` label selector:
|
||||
|
||||
```
|
||||
ark schedule create nginx-daily --schedule="0 1 * * *" --selector app=nginx
|
||||
```
|
||||
|
||||
Alternatively, you can use some non-standard shorthand cron expressions:
|
||||
|
||||
```
|
||||
ark schedule create nginx-daily --schedule="@daily" --selector app=nginx
|
||||
```
|
||||
|
||||
See the [cron package's documentation][30] for more usage examples.
|
||||
|
||||
1. Simulate a disaster:
|
||||
|
||||
```
|
||||
kubectl delete namespace nginx-example
|
||||
```
|
||||
|
||||
1. To check that the nginx deployment and service are gone, run:
|
||||
|
||||
```
|
||||
kubectl get deployments --namespace=nginx-example
|
||||
kubectl get services --namespace=nginx-example
|
||||
kubectl get namespace/nginx-example
|
||||
```
|
||||
|
||||
You should get no results.
|
||||
|
||||
NOTE: You might need to wait for a few minutes for the namespace to be fully cleaned up.
|
||||
|
||||
### Restore
|
||||
|
||||
1. Run:
|
||||
|
||||
```
|
||||
ark restore create --from-backup nginx-backup
|
||||
```
|
||||
|
||||
1. Run:
|
||||
|
||||
```
|
||||
ark restore get
|
||||
```
|
||||
|
||||
After the restore finishes, the output looks like the following:
|
||||
|
||||
```
|
||||
NAME BACKUP STATUS WARNINGS ERRORS CREATED SELECTOR
|
||||
nginx-backup-20170727200524 nginx-backup Completed 0 0 2017-07-27 20:05:24 +0000 UTC <none>
|
||||
```
|
||||
|
||||
NOTE: The restore can take a few moments to finish. During this time, the `STATUS` column reads `InProgress`.
|
||||
|
||||
After a successful restore, the `STATUS` column is `Completed`, and `WARNINGS` and `ERRORS` are 0. All objects in the `nginx-example` namespace should be just as they were before you deleted them.
|
||||
|
||||
If there are errors or warnings, you can look at them in detail:
|
||||
|
||||
```
|
||||
ark restore describe <RESTORE_NAME>
|
||||
```
|
||||
|
||||
For more information, see [the debugging information][18].
|
||||
|
||||
### Clean up
|
||||
|
||||
If you want to delete any backups you created, including data in object storage and persistent
|
||||
volume snapshots, you can run:
|
||||
|
||||
```
|
||||
ark backup delete BACKUP_NAME
|
||||
```
|
||||
|
||||
This asks the Ark server to delete all backup data associated with `BACKUP_NAME`. You need to do
|
||||
this for each backup you want to permanently delete. A future version of Ark will allow you to
|
||||
delete multiple backups by name or label selector.
|
||||
|
||||
Once fully removed, the backup is no longer visible when you run:
|
||||
|
||||
```
|
||||
ark backup get BACKUP_NAME
|
||||
```
|
||||
|
||||
If you want to uninstall Ark but preserve the backup data in object storage and persistent volume
|
||||
snapshots, it is safe to remove the `heptio-ark` namespace and everything else created for this
|
||||
example:
|
||||
|
||||
```
|
||||
kubectl delete -f config/common/
|
||||
kubectl delete -f config/minio/
|
||||
kubectl delete -f config/nginx-app/base.yaml
|
||||
```
|
||||
|
||||
[31]: expose-minio.md
|
||||
[3]: install-overview.md
|
||||
[18]: debugging-restores.md
|
||||
[26]: https://github.com/heptio/ark/releases
|
||||
[30]: https://godoc.org/github.com/robfig/cron
|
||||
@@ -5,23 +5,79 @@ Heptio Ark currently supports executing commands in containers in pods during a
|
||||
## Backup Hooks
|
||||
|
||||
When performing a backup, you can specify one or more commands to execute in a container in a pod
|
||||
when that pod is being backed up. There are two ways to specify hooks: annotations on the pod
|
||||
itself, and in the Backup spec.
|
||||
when that pod is being backed up.
|
||||
|
||||
Ark versions prior to v0.7.0 only support hooks that execute prior to any custom action processing
|
||||
("pre" hooks).
|
||||
|
||||
As of version v0.7.0, Ark also supports "post" hooks - these execute after all custom actions have
|
||||
completed, as well as after all the additional items specified by custom actions have been backed
|
||||
up.
|
||||
|
||||
There are two ways to specify hooks: annotations on the pod itself, and in the Backup spec.
|
||||
|
||||
### Specifying Hooks As Pod Annotations
|
||||
|
||||
You can use the following annotations on a pod to make Ark execute a hook when backing up the pod:
|
||||
|
||||
#### Pre hooks
|
||||
|
||||
| Annotation Name | Description |
|
||||
| --- | --- |
|
||||
| `hook.backup.ark.heptio.com/container` | The container where the command should be executed. Defaults to the first container in the pod. Optional. |
|
||||
| `hook.backup.ark.heptio.com/command` | The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]` |
|
||||
| `hook.backup.ark.heptio.com/on-error` | What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional. |
|
||||
| `hook.backup.ark.heptio.com/timeout` | How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional. |
|
||||
| `pre.hook.backup.ark.heptio.com/container` | The container where the command should be executed. Defaults to the first container in the pod. Optional. |
|
||||
| `pre.hook.backup.ark.heptio.com/command` | The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]` |
|
||||
| `pre.hook.backup.ark.heptio.com/on-error` | What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional. |
|
||||
| `pre.hook.backup.ark.heptio.com/timeout` | How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional. |
|
||||
|
||||
Ark v0.7.0+ continues to support the original (deprecated) way to specify pre hooks - without the
|
||||
`pre.` prefix in the annotation names (e.g. `hook.backup.ark.heptio.com/container`).
|
||||
|
||||
#### Post hooks (v0.7.0+)
|
||||
|
||||
| Annotation Name | Description |
|
||||
| --- | --- |
|
||||
| `post.hook.backup.ark.heptio.com/container` | The container where the command should be executed. Defaults to the first container in the pod. Optional. |
|
||||
| `post.hook.backup.ark.heptio.com/command` | The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]` |
|
||||
| `post.hook.backup.ark.heptio.com/on-error` | What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional. |
|
||||
| `post.hook.backup.ark.heptio.com/timeout` | How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional. |
|
||||
|
||||
### Specifying Hooks in the Backup Spec
|
||||
|
||||
Please see the documentation on the [Backup API Type][1] for how to specify hooks in the Backup
|
||||
spec.
|
||||
|
||||
## Hook Example with fsfreeze
|
||||
|
||||
We are going to walk through using both pre and post hooks for freezing a file system. Freezing the
|
||||
file system is useful to ensure that all pending disk I/O operations have completed prior to taking a snapshot.
|
||||
|
||||
We will be using [examples/nginx-app/with-pv.yaml][2] for this example. Follow the [steps for your provider][3] to
|
||||
setup this example.
|
||||
|
||||
### Annotations
|
||||
|
||||
The Ark [example/nginx-app/with-pv.yaml][2] serves as an example of adding the pre and post hook annotations directly
|
||||
to your declarative deployment. Below is an example of what updating an object in place might look like.
|
||||
|
||||
```shell
|
||||
kubectl annotate pod -n nginx-example -l app=nginx \
|
||||
pre.hook.backup.ark.heptio.com/command='["/sbin/fsfreeze", "--freeze", "/var/log/nginx"]' \
|
||||
pre.hook.backup.ark.heptio.com/container=fsfreeze \
|
||||
post.hook.backup.ark.heptio.com/command='["/sbin/fsfreeze", "--unfreeze", "/var/log/nginx"]' \
|
||||
post.hook.backup.ark.heptio.com/container=fsfreeze
|
||||
```
|
||||
|
||||
Now test the pre and post hooks by creating a backup. You can use the Ark logs to verify that the pre and post
|
||||
hooks are running and exiting without error.
|
||||
|
||||
```shell
|
||||
ark backup create nginx-hook-test
|
||||
|
||||
ark backup get nginx-hook-test
|
||||
ark backup logs nginx-hook-test | grep hookCommand
|
||||
```
|
||||
|
||||
|
||||
[1]: api-types/backup.md
|
||||
[2]: examples/nginx-app/with-pv.yaml
|
||||
[3]: cloud-common.md
|
||||
|
||||
80
docs/ibm-config.md
Normal file
80
docs/ibm-config.md
Normal file
@@ -0,0 +1,80 @@
|
||||
# Use IBM Cloud Object Storage as Ark's storage destination.
|
||||
You can deploy Ark on IBM [Public][5] or [Private][4] clouds, or even on any other Kubernetes cluster, but anyway you can use IBM Cloud Object Store as a destination for Ark's backups.
|
||||
|
||||
To set up IBM Cloud Object Storage (COS) as Ark's destination, you:
|
||||
|
||||
* Create your COS instance
|
||||
* Create an S3 bucket
|
||||
* Define a service that can store data in the bucket
|
||||
* Configure and start the Ark server
|
||||
|
||||
|
||||
## Create COS instance
|
||||
If you don’t have a COS instance, you can create a new one, according to the detailed instructions in [Creating a new resource instance][1].
|
||||
|
||||
## Create an S3 bucket
|
||||
Heptio Ark requires an object storage bucket to store backups in. See instructions in [Create some buckets to store your data][2].
|
||||
|
||||
## Define a service that can store data in the bucket.
|
||||
The process of creating service credentials is described in [Service credentials][3].
|
||||
Several comments:
|
||||
|
||||
1. The Ark service will write its backup into the bucket, so it requires the “Writer” access role.
|
||||
|
||||
2. Ark uses an AWS S3 compatible API. Which means it authenticates using a signature created from a pair of access and secret keys — a set of HMAC credentials. You can create these HMAC credentials by specifying `{“HMAC”:true}` as an optional inline parameter. See step 3 in the [Service credentials][3] guide.
|
||||
|
||||
3. After successfully creating a Service credential, you can view the JSON definition of the credential. Under the `cos_hmac_keys` entry there are `access_key_id` and `secret_access_key`. We will use them in the next step.
|
||||
|
||||
4. Create an Ark-specific credentials file (`credentials-ark`) in your local directory:
|
||||
|
||||
```
|
||||
[default]
|
||||
aws_access_key_id=<ACCESS_KEY_ID>
|
||||
aws_secret_access_key=<SECRET_ACCESS_KEY>
|
||||
```
|
||||
|
||||
where the access key id and secret are the values that we got above.
|
||||
|
||||
## Credentials and configuration
|
||||
|
||||
In the Ark directory (i.e. where you extracted the release tarball), run the following to first set up namespaces, RBAC, and other scaffolding. To run in a custom namespace, make sure that you have edited the YAML files to specify the namespace. See [Run in custom namespace][0].
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/common/00-prereqs.yaml
|
||||
```
|
||||
|
||||
Create a Secret. In the directory of the credentials file you just created, run:
|
||||
|
||||
```bash
|
||||
kubectl create secret generic cloud-credentials \
|
||||
--namespace <ARK_NAMESPACE> \
|
||||
--from-file cloud=credentials-ark
|
||||
```
|
||||
|
||||
Specify the following values in the example files:
|
||||
|
||||
* In `config/ibm/05-ark-backupstoragelocation.yaml`:
|
||||
|
||||
* Replace `<YOUR_BUCKET>`, `<YOUR_REGION>` and `<YOUR_URL_ACCESS_POINT>`. See the [BackupStorageLocation definition][6] for details.
|
||||
|
||||
* (Optional) If you run the nginx example, in file `config/nginx-app/with-pv.yaml`:
|
||||
|
||||
* Replace `<YOUR_STORAGE_CLASS_NAME>` with your `StorageClass` name.
|
||||
|
||||
## Start the Ark server
|
||||
|
||||
In the root of your Ark directory, run:
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/ibm/05-ark-backupstoragelocation.yaml
|
||||
kubectl apply -f config/ibm/10-deployment.yaml
|
||||
```
|
||||
|
||||
[0]: namespace.md
|
||||
[1]: https://console.bluemix.net/docs/services/cloud-object-storage/basics/order-storage.html#creating-a-new-resource-instance
|
||||
[2]: https://console.bluemix.net/docs/services/cloud-object-storage/getting-started.html#create-buckets
|
||||
[3]: https://console.bluemix.net/docs/services/cloud-object-storage/iam/service-credentials.html#service-credentials
|
||||
[4]: https://www.ibm.com/support/knowledgecenter/SSBS6K_2.1.0/kc_welcome_containers.html
|
||||
[5]: https://console.bluemix.net/docs/containers/container_index.html#container_index
|
||||
[6]: api-types/backupstoragelocation.md#aws
|
||||
[14]: http://docs.aws.amazon.com/IAM/latest/UserGuide/introduction.html
|
||||
21
docs/image-tagging.md
Normal file
21
docs/image-tagging.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# Image tagging policy
|
||||
|
||||
This document describes Ark's image tagging policy.
|
||||
|
||||
## Released versions
|
||||
|
||||
`gcr.io/heptio-images/ark:<SemVer>`
|
||||
|
||||
Ark follows the [Semantic Versioning](http://semver.org/) standard for releases. Each tag in the `github.com/heptio/ark` repository has a matching image, e.g. `gcr.io/heptio-images/ark:v0.8.0`.
|
||||
|
||||
### Latest
|
||||
|
||||
`gcr.io/heptio-images/ark:latest`
|
||||
|
||||
The `latest` tag follows the most recently released version of Ark.
|
||||
|
||||
## Development
|
||||
|
||||
`gcr.io/heptio-images/ark:master`
|
||||
|
||||
The `master` tag follows the latest commit to land on the `master` branch.
|
||||
109
docs/install-overview.md
Normal file
109
docs/install-overview.md
Normal file
@@ -0,0 +1,109 @@
|
||||
# Set up Ark on your platform
|
||||
|
||||
You can run Ark with a cloud provider or on-premises. For detailed information about the platforms that Ark supports, see [Compatible Storage Providers][99].
|
||||
|
||||
In version 0.7.0 and later, you can run Ark in any namespace, which requires additional customization. See [Run in custom namespace][3].
|
||||
|
||||
In version 0.9.0 and later, you can use Ark's integration with restic, which requires additional setup. See [restic instructions][20].
|
||||
|
||||
## Customize configuration
|
||||
|
||||
Whether you run Ark on a cloud provider or on-premises, if you have more than one volume snapshot location for a given volume provider, you can specify its default location for backups by setting a server flag in your Ark deployment YAML.
|
||||
|
||||
For details, see the documentation topics for individual cloud providers.
|
||||
|
||||
## Cloud provider
|
||||
|
||||
The Ark repository includes a set of example YAML files that specify the settings for each supported cloud provider. For provider-specific instructions, see:
|
||||
|
||||
* [Run Ark on AWS][0]
|
||||
* [Run Ark on GCP][1]
|
||||
* [Run Ark on Azure][2]
|
||||
* [Use IBM Cloud Object Store as Ark's storage destination][4]
|
||||
|
||||
## On-premises
|
||||
|
||||
You can run Ark in an on-premises cluster in different ways depending on your requirements.
|
||||
|
||||
First, you must select an object storage backend that Ark can use to store backup data. [Compatible Storage Providers][99] contains information on various
|
||||
options that are supported or have been reported to work by users. [Minio][101] is an option if you want to keep your backup data on-premises and you are
|
||||
not using another storage platform that offers an S3-compatible object storage API.
|
||||
|
||||
Second, if you need to back up persistent volume data, you must select a volume backup solution. [Volume Snapshot Providers][100] contains information on
|
||||
the supported options. For example, if you use [Portworx][102] for persistent storage, you can install their Ark plugin to get native Portworx snapshots as part
|
||||
of your Ark backups. If there is no native snapshot plugin available for your storage platform, you can use Ark's [restic integration][20], which provides a
|
||||
platform-agnostic backup solution for volume data.
|
||||
|
||||
## Examples
|
||||
|
||||
After you set up the Ark server, try these examples:
|
||||
|
||||
### Basic example (without PersistentVolumes)
|
||||
|
||||
1. Start the sample nginx app:
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/nginx-app/base.yaml
|
||||
```
|
||||
|
||||
1. Create a backup:
|
||||
|
||||
```bash
|
||||
ark backup create nginx-backup --include-namespaces nginx-example
|
||||
```
|
||||
|
||||
1. Simulate a disaster:
|
||||
|
||||
```bash
|
||||
kubectl delete namespaces nginx-example
|
||||
```
|
||||
|
||||
Wait for the namespace to be deleted.
|
||||
|
||||
1. Restore your lost resources:
|
||||
|
||||
```bash
|
||||
ark restore create --from-backup nginx-backup
|
||||
```
|
||||
|
||||
### Snapshot example (with PersistentVolumes)
|
||||
|
||||
> NOTE: For Azure, you must run Kubernetes version 1.7.2 or later to support PV snapshotting of managed disks.
|
||||
|
||||
1. Start the sample nginx app:
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/nginx-app/with-pv.yaml
|
||||
```
|
||||
|
||||
1. Create a backup with PV snapshotting:
|
||||
|
||||
```bash
|
||||
ark backup create nginx-backup --include-namespaces nginx-example
|
||||
```
|
||||
|
||||
1. Simulate a disaster:
|
||||
|
||||
```bash
|
||||
kubectl delete namespaces nginx-example
|
||||
```
|
||||
|
||||
Because the default [reclaim policy][19] for dynamically-provisioned PVs is "Delete", these commands should trigger your cloud provider to delete the disk that backs the PV. Deletion is asynchronous, so this may take some time. **Before continuing to the next step, check your cloud provider to confirm that the disk no longer exists.**
|
||||
|
||||
1. Restore your lost resources:
|
||||
|
||||
```bash
|
||||
ark restore create --from-backup nginx-backup
|
||||
```
|
||||
|
||||
[0]: aws-config.md
|
||||
[1]: gcp-config.md
|
||||
[2]: azure-config.md
|
||||
[3]: namespace.md
|
||||
[4]: ibm-config.md
|
||||
[19]: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#reclaiming
|
||||
[20]: restic.md
|
||||
[99]: support-matrix.md
|
||||
[100]: support-matrix.md#volume-snapshot-providers
|
||||
[101]: https://www.minio.io
|
||||
[102]: https://portworx.com
|
||||
45
docs/issue-template-gen/main.go
Normal file
45
docs/issue-template-gen/main.go
Normal file
@@ -0,0 +1,45 @@
|
||||
/*
|
||||
Copyright 2018 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This code renders the IssueTemplate string in pkg/cmd/cli/bug/bug.go to
|
||||
// .github/ISSUE_TEMPLATE/bug_report.md via the hack/update-generated-issue-template.sh script.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"text/template"
|
||||
|
||||
"github.com/heptio/ark/pkg/cmd/cli/bug"
|
||||
)
|
||||
|
||||
func main() {
|
||||
outTemplateFilename := os.Args[1]
|
||||
outFile, err := os.OpenFile(outTemplateFilename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer outFile.Close()
|
||||
tmpl, err := template.New("ghissue").Parse(bug.IssueTemplate)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
err = tmpl.Execute(outFile, bug.ArkBugInfo{})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
168
docs/locations.md
Normal file
168
docs/locations.md
Normal file
@@ -0,0 +1,168 @@
|
||||
# Backup Storage Locations and Volume Snapshot Locations
|
||||
|
||||
Ark v0.10 introduces a new way of configuring where Ark backups and their associated persistent volume snapshots are stored.
|
||||
|
||||
## Motivations
|
||||
|
||||
In Ark versions prior to v0.10, the configuration for where to store backups & volume snapshots is specified in a `Config` custom resource. The `backupStorageProvider` section captures the place where all Ark backups should be stored. This is defined by a **provider** (e.g. `aws`, `azure`, `gcp`, `minio`, etc.), a **bucket**, and possibly some additional provider-specific settings (e.g. `region`). Similarly, the `persistentVolumeProvider` section captures the place where all persistent volume snapshots taken as part of Ark backups should be stored, and is defined by a **provider** and additional provider-specific settings (e.g. `region`).
|
||||
|
||||
There are a number of use cases that this basic design does not support, such as:
|
||||
|
||||
- Take snapshots of more than one kind of persistent volume in a single Ark backup (e.g. in a cluster with both EBS volumes and Portworx volumes)
|
||||
- Have some Ark backups go to a bucket in an eastern USA region, and others go to a bucket in a western USA region
|
||||
- For volume providers that support it (e.g. Portworx), have some snapshots be stored locally on the cluster and have others be stored in the cloud
|
||||
|
||||
Additionally, as we look ahead to backup replication, a major feature on our roadmap, we know that we'll need Ark to be able to support multiple possible storage locations.
|
||||
|
||||
## Overview
|
||||
|
||||
In Ark v0.10 we got rid of the `Config` custom resource, and replaced it with two new custom resources, `BackupStorageLocation` and `VolumeSnapshotLocation`. The new resources directly replace the legacy `backupStorageProvider` and `persistentVolumeProvider` sections of the `Config` resource, respectively.
|
||||
|
||||
Now, the user can pre-define more than one possible `BackupStorageLocation` and more than one `VolumeSnapshotLocation`, and can select *at backup creation time* the location in which the backup and associated snapshots should be stored.
|
||||
|
||||
A `BackupStorageLocation` is defined as a bucket, a prefix within that bucket under which all Ark data should be stored, and a set of additional provider-specific fields (e.g. AWS region, Azure storage account, etc.) The [API documentation][1] captures the configurable parameters for each in-tree provider.
|
||||
|
||||
A `VolumeSnapshotLocation` is defined entirely by provider-specific fields (e.g. AWS region, Azure resource group, Portworx snapshot type, etc.) The [API documentation][2] captures the configurable parameters for each in-tree provider.
|
||||
|
||||
Additionally, since multiple `VolumeSnapshotLocations` can be created, the user can now configure locations for more than one volume provider, and if the cluster has volumes from multiple providers (e.g. AWS EBS and Portworx), all of them can be snapshotted in a single Ark backup.
|
||||
|
||||
## Limitations / Caveats
|
||||
|
||||
- Volume snapshots are still limited by where your provider allows you to create snapshots. For example, AWS and Azure do not allow you to create a volume snapshot in a different region than where the volume is. If you try to take an Ark backup using a volume snapshot location with a different region than where your cluster's volumes are, the backup will fail.
|
||||
|
||||
- Each Ark backup has one `BackupStorageLocation`, and one `VolumeSnapshotLocation` per volume provider. It is not possible (yet) to send a single Ark backup to multiple backup storage locations simultaneously, or a single volume snapshot to multiple locations simultaneously. However, you can always set up multiple scheduled backups that differ only in the storage locations used if redundancy of backups across locations is important.
|
||||
|
||||
- Cross-provider snapshots are not supported. If you have a cluster with more than one type of volume (e.g. EBS and Portworx), but you only have a `VolumeSnapshotLocation` configured for EBS, then Ark will **only** snapshot the EBS volumes.
|
||||
|
||||
- Restic data is now stored under a prefix/subdirectory of the main Ark bucket, and will go into the bucket corresponding to the `BackupStorageLocation` selected by the user at backup creation time.
|
||||
|
||||
## Examples
|
||||
|
||||
Let's look at some examples of how we can use this new mechanism to address each of our previously unsupported use cases:
|
||||
|
||||
#### Take snapshots of more than one kind of persistent volume in a single Ark backup (e.g. in a cluster with both EBS volumes and Portworx volumes)
|
||||
|
||||
During server configuration:
|
||||
|
||||
```shell
|
||||
ark snapshot-location create ebs-us-east-1 \
|
||||
--provider aws \
|
||||
--config region=us-east-1
|
||||
|
||||
ark snapshot-location create portworx-cloud \
|
||||
--provider portworx \
|
||||
--config type=cloud
|
||||
```
|
||||
|
||||
During backup creation:
|
||||
|
||||
```shell
|
||||
ark backup create full-cluster-backup \
|
||||
--volume-snapshot-locations ebs-us-east-1,portworx-cloud
|
||||
```
|
||||
|
||||
Alternately, since in this example there's only one possible volume snapshot location configured for each of our two providers (`ebs-us-east-1` for `aws`, and `portworx-cloud` for `portworx`), Ark doesn't require them to be explicitly specified when creating the backup:
|
||||
|
||||
```shell
|
||||
ark backup create full-cluster-backup
|
||||
```
|
||||
|
||||
#### Have some Ark backups go to a bucket in an eastern USA region, and others go to a bucket in a western USA region
|
||||
|
||||
During server configuration:
|
||||
|
||||
```shell
|
||||
ark backup-location create default \
|
||||
--provider aws \
|
||||
--bucket ark-backups \
|
||||
--config region=us-east-1
|
||||
|
||||
ark backup-location create s3-alt-region \
|
||||
--provider aws \
|
||||
--bucket ark-backups-alt \
|
||||
--config region=us-west-1
|
||||
```
|
||||
|
||||
During backup creation:
|
||||
```shell
|
||||
# The Ark server will automatically store backups in the backup storage location named "default" if
|
||||
# one is not specified when creating the backup. You can alter which backup storage location is used
|
||||
# by default by setting the --default-backup-storage-location flag on the `ark server` command (run
|
||||
# by the Ark deployment) to the name of a different backup storage location.
|
||||
ark backup create full-cluster-backup
|
||||
```
|
||||
Or:
|
||||
```shell
|
||||
ark backup create full-cluster-alternate-location-backup \
|
||||
--storage-location s3-alt-region
|
||||
```
|
||||
|
||||
#### For volume providers that support it (e.g. Portworx), have some snapshots be stored locally on the cluster and have others be stored in the cloud
|
||||
|
||||
During server configuration:
|
||||
|
||||
```shell
|
||||
ark snapshot-location create portworx-local \
|
||||
--provider portworx \
|
||||
--config type=local
|
||||
|
||||
ark snapshot-location create portworx-cloud \
|
||||
--provider portworx \
|
||||
--config type=cloud
|
||||
```
|
||||
|
||||
During backup creation:
|
||||
|
||||
```shell
|
||||
# Note that since in this example we have two possible volume snapshot locations for the Portworx
|
||||
# provider, we need to explicitly specify which one to use when creating a backup. Alternately,
|
||||
# you can set the --default-volume-snapshot-locations flag on the `ark server` command (run by
|
||||
# the Ark deployment) to specify which location should be used for each provider by default, in
|
||||
# which case you don't need to specify it when creating a backup.
|
||||
ark backup create local-snapshot-backup \
|
||||
--volume-snapshot-locations portworx-local
|
||||
```
|
||||
|
||||
Or:
|
||||
|
||||
```shell
|
||||
ark backup create cloud-snapshot-backup \
|
||||
--volume-snapshot-locations portworx-cloud
|
||||
```
|
||||
|
||||
#### One location is still easy
|
||||
|
||||
If you don't have a use case for more than one location, it's still just as easy to use Ark. Let's assume you're running on AWS, in the `us-west-1` region:
|
||||
|
||||
During server configuration:
|
||||
|
||||
```shell
|
||||
ark backup-location create default \
|
||||
--provider aws \
|
||||
--bucket ark-backups \
|
||||
--config region=us-west-1
|
||||
|
||||
ark snapshot-location create ebs-us-west-1 \
|
||||
--provider aws \
|
||||
--config region=us-west-1
|
||||
```
|
||||
|
||||
During backup creation:
|
||||
```shell
|
||||
# Ark's will automatically use your configured backup storage location and volume snapshot location.
|
||||
# Nothing new needs to be specified when creating a backup.
|
||||
ark backup create full-cluster-backup
|
||||
```
|
||||
|
||||
## Additional Use Cases
|
||||
|
||||
1. If you're using Azure's AKS, you may want to store your volume snapshots outside of the "infrastructure" resource group that is automatically created when you create your AKS cluster. This is now possible using a `VolumeSnapshotLocation`, by specifying a `resourceGroup` under the `config` section of the snapshot location. See the [Azure volume snapshot location documentation][3] for details.
|
||||
|
||||
1. If you're using Azure, you may want to store your Ark backups across multiple storage accounts and/or resource groups. This is now possible using a `BackupStorageLocation`, by specifying a `storageAccount` and/or `resourceGroup`, respectively, under the `config` section of the backup location. See the [Azure backup storage location documentation][4] for details.
|
||||
|
||||
|
||||
|
||||
[1]: api-types/backupstoragelocation.md
|
||||
[2]: api-types/volumesnapshotlocation.md
|
||||
[3]: api-types/volumesnapshotlocation.md#azure
|
||||
[4]: api-types/backupstoragelocation.md#azure
|
||||
48
docs/migration-case.md
Normal file
48
docs/migration-case.md
Normal file
@@ -0,0 +1,48 @@
|
||||
# Cluster migration
|
||||
|
||||
*Using Backups and Restores*
|
||||
|
||||
Heptio Ark can help you port your resources from one cluster to another, as long as you point each Ark instance to the same cloud object storage location. In this scenario, we are also assuming that your clusters are hosted by the same cloud provider. **Note that Heptio Ark does not support the migration of persistent volumes across cloud providers.**
|
||||
|
||||
1. *(Cluster 1)* Assuming you haven't already been checkpointing your data with the Ark `schedule` operation, you need to first back up your entire cluster (replacing `<BACKUP-NAME>` as desired):
|
||||
|
||||
```
|
||||
ark backup create <BACKUP-NAME>
|
||||
```
|
||||
The default TTL is 30 days (720 hours); you can use the `--ttl` flag to change this as necessary.
|
||||
|
||||
1. *(Cluster 2)* Add the `--restore-only` flag to the server spec in the Ark deployment YAML.
|
||||
|
||||
1. *(Cluster 2)* Make sure that the `BackupStorageLocation` and `VolumeSnapshotLocation` CRDs match the ones from *Cluster 1*, so that your new Ark server instance points to the same bucket.
|
||||
|
||||
1. *(Cluster 2)* Make sure that the Ark Backup object is created. Ark resources are synchronized with the backup files in cloud storage.
|
||||
|
||||
```
|
||||
ark backup describe <BACKUP-NAME>
|
||||
```
|
||||
|
||||
**Note:** As of version 0.10, the default sync interval is 1 minute, so make sure to wait before checking. You can configure this interval with the `--backup-sync-period` flag to the Ark server.
|
||||
|
||||
1. *(Cluster 2)* Once you have confirmed that the right Backup (`<BACKUP-NAME>`) is now present, you can restore everything with:
|
||||
|
||||
```
|
||||
ark restore create --from-backup <BACKUP-NAME>
|
||||
```
|
||||
|
||||
## Verify both clusters
|
||||
|
||||
Check that the second cluster is behaving as expected:
|
||||
|
||||
1. *(Cluster 2)* Run:
|
||||
|
||||
```
|
||||
ark restore get
|
||||
```
|
||||
|
||||
1. Then run:
|
||||
|
||||
```
|
||||
ark restore describe <RESTORE-NAME-FROM-GET-COMMAND>
|
||||
```
|
||||
|
||||
If you encounter issues, make sure that Ark is running in the same namespace in both clusters.
|
||||
74
docs/namespace.md
Normal file
74
docs/namespace.md
Normal file
@@ -0,0 +1,74 @@
|
||||
# Run in custom namespace
|
||||
|
||||
In Ark version 0.7.0 and later, you can run Ark in any namespace. To do so, you specify the
|
||||
namespace in the YAML files that configure the Ark server. You then also specify the namespace when
|
||||
you run Ark client commands.
|
||||
|
||||
## Edit the example files
|
||||
|
||||
The Ark release tarballs include a set of example configs that you can use to set up your Ark server. The
|
||||
examples place the server and backup/schedule/restore/etc. data in the `heptio-ark` namespace.
|
||||
|
||||
To run the server in another namespace, you edit the relevant files, changing `heptio-ark` to
|
||||
your desired namespace.
|
||||
|
||||
To store your backups, schedules, restores, and config in another namespace, you edit the relevant
|
||||
files, changing `heptio-ark` to your desired namespace. You also need to create the
|
||||
`cloud-credentials` secret in your desired namespace.
|
||||
|
||||
First, ensure you've [downloaded & extracted the latest release][0].
|
||||
|
||||
For all cloud providers, edit `config/common/00-prereqs.yaml`. This file defines:
|
||||
|
||||
* CustomResourceDefinitions for the Ark objects (backups, schedules, restores, downloadrequests, etc.)
|
||||
* The namespace where the Ark server runs
|
||||
* The namespace where backups, schedules, restores, etc. are stored
|
||||
* The Ark service account
|
||||
* The RBAC rules to grant permissions to the Ark service account
|
||||
|
||||
|
||||
### AWS
|
||||
|
||||
For AWS, edit:
|
||||
|
||||
* `config/aws/05-ark-backupstoragelocation.yaml`
|
||||
* `config/aws/06-ark-volumesnapshotlocation.yaml`
|
||||
* `config/aws/10-deployment.yaml`
|
||||
|
||||
|
||||
### Azure
|
||||
|
||||
For Azure, edit:
|
||||
|
||||
* `config/azure/00-ark-deployment.yaml`
|
||||
* `config/azure/05-ark-backupstoragelocation.yaml`
|
||||
* `config/azure/06-ark-volumesnapshotlocation.yaml`
|
||||
|
||||
### GCP
|
||||
|
||||
For GCP, edit:
|
||||
|
||||
* `config/gcp/05-ark-backupstoragelocation.yaml`
|
||||
* `config/gcp/06-ark-volumesnapshotlocation.yaml`
|
||||
* `config/gcp/10-deployment.yaml`
|
||||
|
||||
|
||||
### IBM
|
||||
|
||||
For IBM, edit:
|
||||
|
||||
* `config/ibm/05-ark-backupstoragelocation.yaml`
|
||||
* `config/ibm/10-deployment.yaml`
|
||||
|
||||
|
||||
## Specify the namespace in client commands
|
||||
|
||||
To specify the namespace for all Ark client commands, run:
|
||||
|
||||
```
|
||||
ark client config set namespace=<NAMESPACE_VALUE>
|
||||
```
|
||||
|
||||
|
||||
|
||||
[0]: get-started.md#download
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
A backup is a gzip-compressed tar file whose name matches the Backup API resource's `metadata.name` (what is specified during `ark backup create <NAME>`).
|
||||
|
||||
In cloud object storage, *each backup file is stored in its own subdirectory* beneath the bucket specified in the Ark server configuration. This subdirectory includes an additional file called `ark-backup.json`. The JSON file explicitly lists all info about your associated Backup resource--including any default values used--so that you have a complete historical record of its configuration. It also specifies `status.version`, which corresponds to the output file format.
|
||||
In cloud object storage, each backup file is stored in its own subdirectory in the bucket specified in the Ark server configuration. This subdirectory includes an additional file called `ark-backup.json`. The JSON file lists all information about your associated Backup resource, including any default values. This gives you a complete historical record of the backup configuration. The JSON file also specifies `status.version`, which corresponds to the output file format.
|
||||
|
||||
All together, the directory structure in your cloud storage may look like:
|
||||
The directory structure in your cloud storage looks something like:
|
||||
|
||||
```
|
||||
rootBucket/
|
||||
@@ -13,9 +13,9 @@ rootBucket/
|
||||
backup1234.tar.gz
|
||||
```
|
||||
|
||||
## `ark-backup.json`
|
||||
An example of this file looks like the following:
|
||||
```
|
||||
## Example backup JSON file
|
||||
|
||||
```json
|
||||
{
|
||||
"kind": "Backup",
|
||||
"apiVersion": "ark.heptio.com/v1",
|
||||
|
||||
31
docs/plugins.md
Normal file
31
docs/plugins.md
Normal file
@@ -0,0 +1,31 @@
|
||||
# Plugins
|
||||
|
||||
Heptio Ark has a plugin architecture that allows users to add their own custom functionality to Ark backups & restores
|
||||
without having to modify/recompile the core Ark binary. To add custom functionality, users simply create their own binary
|
||||
containing implementations of Ark's plugin kinds (described below), plus a small amount of boilerplate code to
|
||||
expose the plugin implementations to Ark. This binary is added to a container image that serves as an init container for
|
||||
the Ark server pod and copies the binary into a shared emptyDir volume for the Ark server to access.
|
||||
|
||||
Multiple plugins, of any type, can be implemented in this binary.
|
||||
|
||||
A fully-functional [sample plugin repository][1] is provided to serve as a convenient starting point for plugin authors.
|
||||
|
||||
## Plugin Kinds
|
||||
|
||||
Ark currently supports the following kinds of plugins:
|
||||
|
||||
- **Object Store** - persists and retrieves backups, backup logs and restore logs
|
||||
- **Block Store** - creates volume snapshots (during backup) and restores volumes from snapshots (during restore)
|
||||
- **Backup Item Action** - executes arbitrary logic for individual items prior to storing them in a backup file
|
||||
- **Restore Item Action** - executes arbitrary logic for individual items prior to restoring them into a cluster
|
||||
|
||||
## Plugin Logging
|
||||
|
||||
Ark provides a [logger][2] that can be used by plugins to log structured information to the main Ark server log or
|
||||
per-backup/restore logs. See the [sample repository][1] for an example of how to instantiate and use the logger
|
||||
within your plugin.
|
||||
|
||||
|
||||
|
||||
[1]: https://github.com/heptio/ark-plugin-example
|
||||
[2]: https://github.com/heptio/ark/blob/master/pkg/plugin/logger.go
|
||||
47
docs/rbac.md
Normal file
47
docs/rbac.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# Run Ark more securely with restrictive RBAC settings
|
||||
|
||||
By default Ark runs with an RBAC policy of ClusterRole `cluster-admin`. This is to make sure that Ark can back up or restore anything in your cluster. But `cluster-admin` access is wide open -- it gives Ark components access to everything in your cluster. Depending on your environment and your security needs, you should consider whether to configure additional RBAC policies with more restrictive access.
|
||||
|
||||
**Note:** Roles and RoleBindings are associated with a single namespaces, not with an entire cluster. PersistentVolume backups are associated only with an entire cluster. This means that any backups or restores that use a restrictive Role and RoleBinding pair can manage only the resources that belong to the namespace. You do not need a wide open RBAC policy to manage PersistentVolumes, however. You can configure a ClusterRole and ClusterRoleBinding that allow backups and restores only of PersistentVolumes, not of all objects in the cluster.
|
||||
|
||||
For more information about RBAC and access control generally in Kubernetes, see the Kubernetes documentation about [access control][1], [managing service accounts][2], and [RBAC authorization][3].
|
||||
|
||||
## Set up Roles and RoleBindings
|
||||
|
||||
Here's a sample Role and RoleBinding pair.
|
||||
|
||||
```yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
namespace: YOUR_NAMESPACE_HERE
|
||||
name: ROLE_NAME_HERE
|
||||
labels:
|
||||
component: ark
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ark.heptio.com
|
||||
verbs:
|
||||
- "*"
|
||||
resources:
|
||||
- "*"
|
||||
```
|
||||
|
||||
```yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: ROLEBINDING_NAME_HERE
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: YOUR_SERVICEACCOUNT_HERE
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: ROLE_NAME_HERE
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
```
|
||||
|
||||
[1]: https://kubernetes.io/docs/reference/access-authn-authz/controlling-access/
|
||||
[2]: https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/
|
||||
[3]: https://kubernetes.io/docs/reference/access-authn-authz/rbac/
|
||||
[4]: namespace.md
|
||||
248
docs/restic.md
Normal file
248
docs/restic.md
Normal file
@@ -0,0 +1,248 @@
|
||||
# Restic Integration
|
||||
|
||||
As of version 0.9.0, Ark has support for backing up and restoring Kubernetes volumes using a free open-source backup tool called
|
||||
[restic][1].
|
||||
|
||||
Ark has always allowed you to take snapshots of persistent volumes as part of your backups if you’re using one of
|
||||
the supported cloud providers’ block storage offerings (Amazon EBS Volumes, Azure Managed Disks, Google Persistent Disks).
|
||||
Starting with version 0.6.0, we provide a plugin model that enables anyone to implement additional object and block storage
|
||||
backends, outside the main Ark repository.
|
||||
|
||||
We integrated restic with Ark so that users have an out-of-the-box solution for backing up and restoring almost any type of Kubernetes
|
||||
volume*. This is a new capability for Ark, not a replacement for existing functionality. If you're running on AWS, and
|
||||
taking EBS snapshots as part of your regular Ark backups, there's no need to switch to using restic. However, if you've
|
||||
been waiting for a snapshot plugin for your storage platform, or if you're using EFS, AzureFile, NFS, emptyDir,
|
||||
local, or any other volume type that doesn't have a native snapshot concept, restic might be for you.
|
||||
|
||||
Restic is not tied to a specific storage platform, which means that this integration also paves the way for future work to enable
|
||||
cross-volume-type data migrations. Stay tuned as this evolves!
|
||||
|
||||
\* hostPath volumes are not supported, but the [new local volume type][4] is supported.
|
||||
|
||||
## Setup
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- A working install of Ark version 0.10.0 or later. See [Set up Ark][2]
|
||||
- A local clone of [the latest release tag of the Ark repository][3]
|
||||
- Ark's restic integration requires the Kubernetes [MountPropagation feature][6], which is enabled by default in Kubernetes v1.10.0 and later.
|
||||
|
||||
|
||||
### Instructions
|
||||
|
||||
1. Ensure you've [downloaded & extracted the latest release][3].
|
||||
|
||||
1. In the Ark directory (i.e. where you extracted the release tarball), run the following to create new custom resource definitions:
|
||||
|
||||
```bash
|
||||
kubectl apply -f config/common/00-prereqs.yaml
|
||||
```
|
||||
|
||||
1. Run one of the following for your platform to create the daemonset:
|
||||
|
||||
- AWS: `kubectl apply -f config/aws/20-restic-daemonset.yaml`
|
||||
- Azure: `kubectl apply -f config/azure/20-restic-daemonset.yaml`
|
||||
- GCP: `kubectl apply -f config/gcp/20-restic-daemonset.yaml`
|
||||
- Minio: `kubectl apply -f config/minio/30-restic-daemonset.yaml`
|
||||
|
||||
You're now ready to use Ark with restic.
|
||||
|
||||
## Back up
|
||||
|
||||
1. Run the following for each pod that contains a volume to back up:
|
||||
|
||||
```bash
|
||||
kubectl -n YOUR_POD_NAMESPACE annotate pod/YOUR_POD_NAME backup.ark.heptio.com/backup-volumes=YOUR_VOLUME_NAME_1,YOUR_VOLUME_NAME_2,...
|
||||
```
|
||||
|
||||
where the volume names are the names of the volumes in the pod spec.
|
||||
|
||||
For example, for the following pod:
|
||||
|
||||
```bash
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: sample
|
||||
namespace: foo
|
||||
spec:
|
||||
containers:
|
||||
- image: k8s.gcr.io/test-webserver
|
||||
name: test-webserver
|
||||
volumeMounts:
|
||||
- name: pvc-volume
|
||||
mountPath: /volume-1
|
||||
- name: emptydir-volume
|
||||
mountPath: /volume-2
|
||||
volumes:
|
||||
- name: pvc-volume
|
||||
persistentVolumeClaim:
|
||||
claimName: test-volume-claim
|
||||
- name: emptydir-volume
|
||||
emptyDir: {}
|
||||
```
|
||||
|
||||
You'd run:
|
||||
```bash
|
||||
kubectl -n foo annotate pod/sample backup.ark.heptio.com/backup-volumes=pvc-volume,emptydir-volume
|
||||
```
|
||||
|
||||
This annotation can also be provided in a pod template spec if you use a controller to manage your pods.
|
||||
|
||||
1. Take an Ark backup:
|
||||
|
||||
```bash
|
||||
ark backup create NAME OPTIONS...
|
||||
```
|
||||
|
||||
1. When the backup completes, view information about the backups:
|
||||
|
||||
```bash
|
||||
ark backup describe YOUR_BACKUP_NAME
|
||||
|
||||
kubectl -n heptio-ark get podvolumebackups -l ark.heptio.com/backup-name=YOUR_BACKUP_NAME -o yaml
|
||||
```
|
||||
|
||||
## Restore
|
||||
|
||||
1. Restore from your Ark backup:
|
||||
|
||||
```bash
|
||||
ark restore create --from-backup BACKUP_NAME OPTIONS...
|
||||
```
|
||||
|
||||
1. When the restore completes, view information about your pod volume restores:
|
||||
|
||||
```bash
|
||||
ark restore describe YOUR_RESTORE_NAME
|
||||
|
||||
kubectl -n heptio-ark get podvolumerestores -l ark.heptio.com/restore-name=YOUR_RESTORE_NAME -o yaml
|
||||
```
|
||||
|
||||
## Limitations
|
||||
|
||||
- `hostPath` volumes are not supported. [Local persistent volumes][4] are supported.
|
||||
- Those of you familiar with [restic][1] may know that it encrypts all of its data. We've decided to use a static,
|
||||
common encryption key for all restic repositories created by Ark. **This means that anyone who has access to your
|
||||
bucket can decrypt your restic backup data**. Make sure that you limit access to the restic bucket
|
||||
appropriately. We plan to implement full Ark backup encryption, including securing the restic encryption keys, in
|
||||
a future release.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
Run the following checks:
|
||||
|
||||
Are your Ark server and daemonset pods running?
|
||||
|
||||
```bash
|
||||
kubectl get pods -n heptio-ark
|
||||
```
|
||||
|
||||
Does your restic repository exist, and is it ready?
|
||||
|
||||
```bash
|
||||
ark restic repo get
|
||||
|
||||
ark restic repo get REPO_NAME -o yaml
|
||||
```
|
||||
|
||||
Are there any errors in your Ark backup/restore?
|
||||
|
||||
```bash
|
||||
ark backup describe BACKUP_NAME
|
||||
ark backup logs BACKUP_NAME
|
||||
|
||||
ark restore describe RESTORE_NAME
|
||||
ark restore logs RESTORE_NAME
|
||||
```
|
||||
|
||||
What is the status of your pod volume backups/restores?
|
||||
|
||||
```bash
|
||||
kubectl -n heptio-ark get podvolumebackups -l ark.heptio.com/backup-name=BACKUP_NAME -o yaml
|
||||
|
||||
kubectl -n heptio-ark get podvolumerestores -l ark.heptio.com/restore-name=RESTORE_NAME -o yaml
|
||||
```
|
||||
|
||||
Is there any useful information in the Ark server or daemon pod logs?
|
||||
|
||||
```bash
|
||||
kubectl -n heptio-ark logs deploy/ark
|
||||
kubectl -n heptio-ark logs DAEMON_POD_NAME
|
||||
```
|
||||
|
||||
**NOTE**: You can increase the verbosity of the pod logs by adding `--log-level=debug` as an argument
|
||||
to the container command in the deployment/daemonset pod template spec.
|
||||
|
||||
## How backup and restore work with restic
|
||||
|
||||
We introduced three custom resource definitions and associated controllers:
|
||||
|
||||
- `ResticRepository` - represents/manages the lifecycle of Ark's [restic repositories][5]. Ark creates
|
||||
a restic repository per namespace when the first restic backup for a namespace is requested. The controller
|
||||
for this custom resource executes restic repository lifecycle commands -- `restic init`, `restic check`,
|
||||
and `restic prune`.
|
||||
|
||||
You can see information about your Ark restic repositories by running `ark restic repo get`.
|
||||
|
||||
- `PodVolumeBackup` - represents a restic backup of a volume in a pod. The main Ark backup process creates
|
||||
one or more of these when it finds an annotated pod. Each node in the cluster runs a controller for this
|
||||
resource (in a daemonset) that handles the `PodVolumeBackups` for pods on that node. The controller executes
|
||||
`restic backup` commands to backup pod volume data.
|
||||
|
||||
- `PodVolumeRestore` - represents a restic restore of a pod volume. The main Ark restore process creates one
|
||||
or more of these when it encounters a pod that has associated restic backups. Each node in the cluster runs a
|
||||
controller for this resource (in the same daemonset as above) that handles the `PodVolumeRestores` for pods
|
||||
on that node. The controller executes `restic restore` commands to restore pod volume data.
|
||||
|
||||
### Backup
|
||||
|
||||
1. The main Ark backup process checks each pod that it's backing up for the annotation specifying a restic backup
|
||||
should be taken (`backup.ark.heptio.com/backup-volumes`)
|
||||
1. When found, Ark first ensures a restic repository exists for the pod's namespace, by:
|
||||
- checking if a `ResticRepository` custom resource already exists
|
||||
- if not, creating a new one, and waiting for the `ResticRepository` controller to init/check it
|
||||
1. Ark then creates a `PodVolumeBackup` custom resource per volume listed in the pod annotation
|
||||
1. The main Ark process now waits for the `PodVolumeBackup` resources to complete or fail
|
||||
1. Meanwhile, each `PodVolumeBackup` is handled by the controller on the appropriate node, which:
|
||||
- has a hostPath volume mount of `/var/lib/kubelet/pods` to access the pod volume data
|
||||
- finds the pod volume's subdirectory within the above volume
|
||||
- runs `restic backup`
|
||||
- updates the status of the custom resource to `Completed` or `Failed`
|
||||
1. As each `PodVolumeBackup` finishes, the main Ark process captures its restic snapshot ID and adds it as an annotation
|
||||
to the copy of the pod JSON that's stored in the Ark backup. This will be used for restores, as seen in the next section.
|
||||
|
||||
### Restore
|
||||
|
||||
1. The main Ark restore process checks each pod that it's restoring for annotations specifying a restic backup
|
||||
exists for a volume in the pod (`snapshot.ark.heptio.com/<volume-name>`)
|
||||
1. When found, Ark first ensures a restic repository exists for the pod's namespace, by:
|
||||
- checking if a `ResticRepository` custom resource already exists
|
||||
- if not, creating a new one, and waiting for the `ResticRepository` controller to init/check it (note that
|
||||
in this case, the actual repository should already exist in object storage, so the Ark controller will simply
|
||||
check it for integrity)
|
||||
1. Ark adds an init container to the pod, whose job is to wait for all restic restores for the pod to complete (more
|
||||
on this shortly)
|
||||
1. Ark creates the pod, with the added init container, by submitting it to the Kubernetes API
|
||||
1. Ark creates a `PodVolumeRestore` custom resource for each volume to be restored in the pod
|
||||
1. The main Ark process now waits for each `PodVolumeRestore` resource to complete or fail
|
||||
1. Meanwhile, each `PodVolumeRestore` is handled by the controller on the appropriate node, which:
|
||||
- has a hostPath volume mount of `/var/lib/kubelet/pods` to access the pod volume data
|
||||
- waits for the pod to be running the init container
|
||||
- finds the pod volume's subdirectory within the above volume
|
||||
- runs `restic restore`
|
||||
- on success, writes a file into the pod volume, in an `.ark` subdirectory, whose name is the UID of the Ark restore
|
||||
that this pod volume restore is for
|
||||
- updates the status of the custom resource to `Completed` or `Failed`
|
||||
1. The init container that was added to the pod is running a process that waits until it finds a file
|
||||
within each restored volume, under `.ark`, whose name is the UID of the Ark restore being run
|
||||
1. Once all such files are found, the init container's process terminates successfully and the pod moves
|
||||
on to running other init containers/the main containers.
|
||||
|
||||
|
||||
[1]: https://github.com/restic/restic
|
||||
[2]: install-overview.md
|
||||
[3]: https://github.com/heptio/ark/releases/
|
||||
[4]: https://kubernetes.io/docs/concepts/storage/volumes/#local
|
||||
[5]: http://restic.readthedocs.io/en/latest/100_references.html#terminology
|
||||
[6]: https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation
|
||||
160
docs/storage-layout-reorg-v0.10.md
Normal file
160
docs/storage-layout-reorg-v0.10.md
Normal file
@@ -0,0 +1,160 @@
|
||||
# Object Storage Layout Changes in v0.10
|
||||
|
||||
## Overview
|
||||
|
||||
Ark v0.10 includes breaking changes to where data is stored in your object storage bucket. You'll need to run a [one-time migration procedure](#upgrading-to-v010)
|
||||
if you're upgrading from prior versions of Ark.
|
||||
|
||||
## Details
|
||||
|
||||
Prior to v0.10, Ark stored data in an object storage bucket using the following structure:
|
||||
|
||||
```
|
||||
<your-bucket>/
|
||||
backup-1/
|
||||
ark-backup.json
|
||||
backup-1.tar.gz
|
||||
backup-1-logs.gz
|
||||
restore-of-backup-1-logs.gz
|
||||
restore-of-backup-1-results.gz
|
||||
backup-2/
|
||||
ark-backup.json
|
||||
backup-2.tar.gz
|
||||
backup-2-logs.gz
|
||||
restore-of-backup-2-logs.gz
|
||||
restore-of-backup-2-results.gz
|
||||
...
|
||||
```
|
||||
|
||||
Ark also stored restic data, if applicable, in a separate object storage bucket, structured as:
|
||||
|
||||
```
|
||||
<your-ark-restic-bucket>/[<your-optional-prefix>/]
|
||||
namespace-1/
|
||||
data/
|
||||
index/
|
||||
keys/
|
||||
snapshots/
|
||||
config
|
||||
namespace-2/
|
||||
data/
|
||||
index/
|
||||
keys/
|
||||
snapshots/
|
||||
config
|
||||
...
|
||||
```
|
||||
|
||||
As of v0.10, we've reorganized this layout to provide a cleaner and more extensible directory structure. The new layout looks like:
|
||||
|
||||
```
|
||||
<your-bucket>[/<your-prefix>]/
|
||||
backups/
|
||||
backup-1/
|
||||
ark-backup.json
|
||||
backup-1.tar.gz
|
||||
backup-1-logs.gz
|
||||
backup-2/
|
||||
ark-backup.json
|
||||
backup-2.tar.gz
|
||||
backup-2-logs.gz
|
||||
...
|
||||
restores/
|
||||
restore-of-backup-1/
|
||||
restore-of-backup-1-logs.gz
|
||||
restore-of-backup-1-results.gz
|
||||
restore-of-backup-2/
|
||||
restore-of-backup-2-logs.gz
|
||||
restore-of-backup-2-results.gz
|
||||
...
|
||||
restic/
|
||||
namespace-1/
|
||||
data/
|
||||
index/
|
||||
keys/
|
||||
snapshots/
|
||||
config
|
||||
namespace-2/
|
||||
data/
|
||||
index/
|
||||
keys/
|
||||
snapshots/
|
||||
config
|
||||
...
|
||||
...
|
||||
```
|
||||
|
||||
## Upgrading to v0.10
|
||||
|
||||
Before upgrading to v0.10, you'll need to run a one-time upgrade script to rearrange the contents of your existing Ark bucket(s) to be compatible with
|
||||
the new layout.
|
||||
|
||||
Please note that the following scripts **will not** migrate existing restore logs/results into the new `restores/` subdirectory. This means that they
|
||||
will not be accessible using `ark restore describe` or `ark restore logs`. They *will* remain in the relevant backup's subdirectory so they are manually
|
||||
accessible, and will eventually be garbage-collected along with the backup. We've taken this approach in order to keep the migration scripts simple
|
||||
and less error-prone.
|
||||
|
||||
### rclone-Based Script
|
||||
|
||||
This script uses [rclone][1], which you can download and install following the instructions [here][2].
|
||||
Please read through the script carefully before starting and execute it step-by-step.
|
||||
|
||||
```bash
|
||||
ARK_BUCKET=<your-ark-bucket>
|
||||
ARK_TEMP_MIGRATION_BUCKET=<a-temp-bucket-for-migration>
|
||||
|
||||
# 1. This is an interactive step that configures rclone to be
|
||||
# able to access your storage provider. Follow the instructions,
|
||||
# and keep track of the "remote name" for the next step:
|
||||
rclone config
|
||||
|
||||
# 2. Store the name of the rclone remote that you just set up
|
||||
# in Step #1:
|
||||
RCLONE_REMOTE_NAME=<your-remote-name>
|
||||
|
||||
# 3. Create a temporary bucket to be used as a backup of your
|
||||
# current Ark bucket's contents:
|
||||
rclone mkdir ${RCLONE_REMOTE_NAME}:${ARK_TEMP_MIGRATION_BUCKET}
|
||||
|
||||
# 4. Do a full copy of the contents of your Ark bucket into the
|
||||
# temporary bucket:
|
||||
rclone copy ${RCLONE_REMOTE_NAME}:${ARK_BUCKET} ${RCLONE_REMOTE_NAME}:${ARK_TEMP_MIGRATION_BUCKET}
|
||||
|
||||
# 5. Verify that the temporary bucket contains an exact copy of
|
||||
# your Ark bucket's contents. You should see a short block
|
||||
# of output stating "0 differences found":
|
||||
rclone check ${RCLONE_REMOTE_NAME}:${ARK_BUCKET} ${RCLONE_REMOTE_NAME}:${ARK_TEMP_MIGRATION_BUCKET}
|
||||
|
||||
# 6. Delete your Ark bucket's contents (this command does not
|
||||
# delete the bucket itself, only the contents):
|
||||
rclone delete ${RCLONE_REMOTE_NAME}:${ARK_BUCKET}
|
||||
|
||||
# 7. Copy the contents of the temporary bucket into your Ark bucket,
|
||||
# under the 'backups/' directory/prefix:
|
||||
rclone copy ${RCLONE_REMOTE_NAME}:${ARK_TEMP_MIGRATION_BUCKET} ${RCLONE_REMOTE_NAME}:${ARK_BUCKET}/backups
|
||||
|
||||
# 8. Verify that the 'backups/' directory in your Ark bucket now
|
||||
# contains an exact copy of the temporary bucket's contents:
|
||||
rclone check ${RCLONE_REMOTE_NAME}:${ARK_BUCKET}/backups ${RCLONE_REMOTE_NAME}:${ARK_TEMP_MIGRATION_BUCKET}
|
||||
|
||||
# 9. OPTIONAL: If you have restic data to migrate:
|
||||
|
||||
# a. Copy the contents of your Ark restic location into your
|
||||
# Ark bucket, under the 'restic/' directory/prefix:
|
||||
ARK_RESTIC_LOCATION=<your-ark-restic-bucket[/optional-prefix]>
|
||||
rclone copy ${RCLONE_REMOTE_NAME}:${ARK_RESTIC_LOCATION} ${RCLONE_REMOTE_NAME}:${ARK_BUCKET}/restic
|
||||
|
||||
# b. Check that the 'restic/' directory in your Ark bucket now
|
||||
# contains an exact copy of your restic location:
|
||||
rclone check ${RCLONE_REMOTE_NAME}:${ARK_BUCKET}/restic ${RCLONE_REMOTE_NAME}:${ARK_RESTIC_LOCATION}
|
||||
|
||||
# c. Delete your ResticRepository custom resources to allow Ark
|
||||
# to find them in the new location:
|
||||
kubectl -n heptio-ark delete resticrepositories --all
|
||||
|
||||
# 10. Once you've confirmed that Ark v0.10 works with your revised Ark
|
||||
# bucket, you can delete the temporary migration bucket.
|
||||
```
|
||||
|
||||
[1]: https://rclone.org/
|
||||
[2]: https://rclone.org/downloads/
|
||||
58
docs/support-matrix.md
Normal file
58
docs/support-matrix.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# Compatible Storage Providers
|
||||
|
||||
Ark supports a variety of storage providers for different backup and snapshot operations. As of version 0.6.0, a plugin system allows anyone to add compatibility for additional backup and volume storage platforms without modifying the Ark codebase.
|
||||
|
||||
## Backup Storage Providers
|
||||
|
||||
| Provider | Owner | Contact |
|
||||
|---------------------------|----------|---------------------------------|
|
||||
| [AWS S3][2] | Ark Team | [Slack][10], [GitHub Issue][11] |
|
||||
| [Azure Blob Storage][3] | Ark Team | [Slack][10], [GitHub Issue][11] |
|
||||
| [Google Cloud Storage][4] | Ark Team | [Slack][10], [GitHub Issue][11] |
|
||||
|
||||
## S3-Compatible Backup Storage Providers
|
||||
|
||||
Ark uses [Amazon's Go SDK][12] to connect to the S3 API. Some third-party storage providers also support the S3 API, and users have reported the following providers work with Ark:
|
||||
|
||||
_Note that these providers are not regularly tested by the Ark team._
|
||||
|
||||
* [IBM Cloud][5]
|
||||
* [Minio][9]
|
||||
* Ceph RADOS v12.2.7
|
||||
* [DigitalOcean][7]
|
||||
* Quobyte
|
||||
|
||||
_Some storage providers, like Quobyte, may need a different [signature algorithm version][15]._
|
||||
|
||||
## Volume Snapshot Providers
|
||||
|
||||
| Provider | Owner | Contact |
|
||||
|----------------------------------|-----------------|---------------------------------|
|
||||
| [AWS EBS][2] | Ark Team | [Slack][10], [GitHub Issue][11] |
|
||||
| [Azure Managed Disks][3] | Ark Team | [Slack][10], [GitHub Issue][11] |
|
||||
| [Google Compute Engine Disks][4] | Ark Team | [Slack][10], [GitHub Issue][11] |
|
||||
| [Restic][1] | Ark Team | [Slack][10], [GitHub Issue][11] |
|
||||
| [Portworx][6] | Portworx | [Slack][13], [GitHub Issue][14] |
|
||||
| [DigitalOcean][7] | StackPointCloud | |
|
||||
|
||||
### Adding a new plugin
|
||||
|
||||
To write a plugin for a new backup or volume storage system, take a look at the [example repo][8].
|
||||
|
||||
After you publish your plugin, open a PR that adds your plugin to the appropriate list.
|
||||
|
||||
[1]: restic.md
|
||||
[2]: aws-config.md
|
||||
[3]: azure-config.md
|
||||
[4]: gcp-config.md
|
||||
[5]: ibm-config.md
|
||||
[6]: https://docs.portworx.com/scheduler/kubernetes/ark.html
|
||||
[7]: https://github.com/StackPointCloud/ark-plugin-digitalocean
|
||||
[8]: https://github.com/heptio/ark-plugin-example/
|
||||
[9]: get-started.md
|
||||
[10]: https://kubernetes.slack.com/messages/ark-dr
|
||||
[11]: https://github.com/heptio/ark/issues
|
||||
[12]: https://github.com/aws/aws-sdk-go/aws
|
||||
[13]: https://portworx.slack.com/messages/px-k8s
|
||||
[14]: https://github.com/portworx/ark-plugin/issues
|
||||
[15]: api-types/backupstoragelocation.md#aws
|
||||
71
docs/troubleshooting.md
Normal file
71
docs/troubleshooting.md
Normal file
@@ -0,0 +1,71 @@
|
||||
# Troubleshooting
|
||||
|
||||
These tips can help you troubleshoot known issues. If they don't help, you can [file an issue][4], or talk to us on the [#ark-dr channel][25] on the Kubernetes Slack server.
|
||||
|
||||
See also:
|
||||
|
||||
- [Debug installation/setup issues][2]
|
||||
- [Debug restores][1]
|
||||
|
||||
## General troubleshooting information
|
||||
|
||||
In `ark` version >= `0.1.0`, you can use the `ark bug` command to open a [Github issue][4] by launching a browser window with some prepopulated values. Values included are OS, CPU architecture, `kubectl` client and server versions (if available) and the `ark` client version. This information isn't submitted to Github until you click the `Submit new issue` button in the Github UI, so feel free to add, remove or update whatever information you like.
|
||||
|
||||
Some general commands for troubleshooting that may be helpful:
|
||||
|
||||
* `ark backup describe <backupName>` - describe the details of a backup
|
||||
* `ark backup logs <backupName>` - fetch the logs for this specific backup. Useful for viewing failures and warnings, including resources that could not be backed up.
|
||||
* `ark restore describe <restoreName>` - describe the details of a restore
|
||||
* `ark restore logs <restoreName>` - fetch the logs for this specific restore. Useful for viewing failures and warnings, including resources that could not be restored.
|
||||
* `kubectl logs deployment/ark -n heptio-ark` - fetch the logs of the Ark server pod. This provides the output of the Ark server processes.
|
||||
|
||||
### Getting ark debug logs
|
||||
|
||||
You can increase the verbosity of the Ark server by editing your Ark deployment to look like this:
|
||||
|
||||
|
||||
```
|
||||
kubectl edit deployment/ark -n heptio-ark
|
||||
...
|
||||
containers:
|
||||
- name: ark
|
||||
image: gcr.io/heptio-images/ark:latest
|
||||
command:
|
||||
- /ark
|
||||
args:
|
||||
- server
|
||||
- --log-level # Add this line
|
||||
- debug # Add this line
|
||||
...
|
||||
```
|
||||
|
||||
## Known issue with restoring LoadBalancer Service
|
||||
|
||||
Because of how Kubernetes handles Service objects of `type=LoadBalancer`, when you restore these objects you might encounter an issue with changed values for Service UIDs. Kubernetes automatically generates the name of the cloud resource based on the Service UID, which is different when restored, resulting in a different name for the cloud load balancer. If the DNS CNAME for your application points to the DNS name of your cloud load balancer, you'll need to update the CNAME pointer when you perform an Ark restore.
|
||||
|
||||
Alternatively, you might be able to use the Service's `spec.loadBalancerIP` field to keep connections valid, if your cloud provider supports this value. See [the Kubernetes documentation about Services of Type LoadBalancer](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer).
|
||||
|
||||
## Miscellaneous issues
|
||||
|
||||
### Ark reports `custom resource not found` errors when starting up.
|
||||
|
||||
Ark's server will not start if the required Custom Resource Definitions are not found in Kubernetes. Apply
|
||||
the `config/common/00-prereqs.yaml` file to create these definitions, then restart Ark.
|
||||
|
||||
### `ark backup logs` returns a `SignatureDoesNotMatch` error
|
||||
|
||||
Downloading artifacts from object storage utilizes temporary, signed URLs. In the case of S3-compatible
|
||||
providers, such as Ceph, there may be differences between their implementation and the official S3
|
||||
API that cause errors.
|
||||
|
||||
Here are some things to verify if you receive `SignatureDoesNotMatch` errors:
|
||||
|
||||
* Make sure your S3-compatible layer is using [signature version 4][5] (such as Ceph RADOS v12.2.7)
|
||||
* For Ceph, try using a native Ceph account for credentials instead of external providers such as OpenStack Keystone
|
||||
|
||||
|
||||
[1]: debugging-restores.md
|
||||
[2]: debugging-install.md
|
||||
[4]: https://github.com/heptio/ark/issues
|
||||
[5]: https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
|
||||
[25]: https://kubernetes.slack.com/messages/ark-dr
|
||||
89
docs/upgrading-to-v0.10.md
Normal file
89
docs/upgrading-to-v0.10.md
Normal file
@@ -0,0 +1,89 @@
|
||||
# Upgrading to Ark v0.10
|
||||
|
||||
## Overview
|
||||
|
||||
Ark v0.10 includes a number of breaking changes. Below, we outline what those changes are, and what steps you should take to ensure
|
||||
a successful upgrade from prior versions of Ark.
|
||||
|
||||
## Breaking Changes
|
||||
|
||||
### Switch from Config to BackupStorageLocation and VolumeSnapshotLocation CRDs, and new server flags
|
||||
|
||||
Prior to v0.10, Ark used a `Config` CRD to capture information about your backup storage and persistent volume providers, as well
|
||||
some miscellaneous Ark settings. In v0.10, we've eliminated this CRD and replaced it with:
|
||||
|
||||
- A [BackupStorageLocation][1] CRD to capture information about where to store your backups
|
||||
- A [VolumeSnapshotLocation][2] CRD to capture information about where to store your persistent volume snapshots
|
||||
- Command-line flags for the `ark server` command (run by your Ark deployment) to capture miscellaneous Ark settings
|
||||
|
||||
When upgrading to v0.10, you'll need to transfer the configuration information that you currently have in the `Config` CRD
|
||||
into the above. We'll cover exactly how to do this below.
|
||||
|
||||
For a general overview of this change, see the [Locations documentation][4].
|
||||
|
||||
### Reorganization of data in object storage
|
||||
|
||||
We've made [changes to the layout of data stored in object storage][3] for simplicity and extensibility. You'll need to
|
||||
rearrange any pre-v0.10 data as part of the upgrade. We've provided a script to help with this.
|
||||
|
||||
## Step-by-Step Upgrade Instructions
|
||||
|
||||
1. Ensure you've [downloaded & extracted the latest release][5].
|
||||
|
||||
1. Scale down your existing Ark deployment:
|
||||
```bash
|
||||
kubectl scale -n heptio-ark deploy/ark --replicas 0
|
||||
```
|
||||
|
||||
1. In the Ark directory (i.e. where you extracted the release tarball), re-apply the `00-prereqs.yaml` file to create new CRDs:
|
||||
```bash
|
||||
kubectl apply -f config/common/00-prereqs.yaml
|
||||
```
|
||||
|
||||
1. Create one or more [BackupStorageLocation][1] resources based on the examples provided in the `config/` directory for your platform, using information from the existing `Config` resource as necessary.
|
||||
|
||||
1. If you're using Ark to take PV snapshots, create one or more [VolumeSnapshotLocation][2] resources based on the examples provided in the `config/` directory for your platform, using information from the existing `Config` resource as necessary.
|
||||
|
||||
1. Perform the one-time object storage migration detailed [here][3].
|
||||
|
||||
1. In your Ark deployment YAML (see the `config/` directory for samples), specify flags to the `ark server` command under the container's `args`:
|
||||
|
||||
a. The names of the `BackupStorageLocation` and `VolumeSnapshotLocation(s)` that should be used by default for backups. If defaults are set here,
|
||||
users won't need to explicitly specify location names when creating backups (though they still can, if they want to store backups/snapshots in
|
||||
alternate locations). If no value is specified for `--default-backup-storage-location`, the Ark server looks for a `BackupStorageLocation`
|
||||
named `default` to use.
|
||||
|
||||
Flag | Default Value | Description | Example
|
||||
---- | ------------- | ----------- | -------
|
||||
`--default-backup-storage-location` | "default" | name of the backup storage location that should be used by default for backups | aws-us-east-1-bucket
|
||||
`--default-volume-snapshot-locations` | [none] | name of the volume snapshot location(s) that should be used by default for PV snapshots, for each PV provider | aws:us-east-1,portworx:local
|
||||
|
||||
**NOTE:** the values of these flags should correspond to the names of a `BackupStorageLocation` and `VolumeSnapshotLocation(s)` custom resources
|
||||
in the cluster.
|
||||
|
||||
b. Any non-default Ark server settings:
|
||||
|
||||
Flag | Default Value | Description
|
||||
---- | ------------- | -----------
|
||||
`--backup-sync-period` | 1m | how often to ensure all Ark backups in object storage exist as Backup API objects in the cluster
|
||||
`--restic-timeout` | 1h | how long backups/restores of pod volumes should be allowed to run before timing out (previously `podVolumeOperationTimeout` in the `Config` resource in pre-v0.10 versions)
|
||||
`--restore-only` | false | run in a mode where only restores are allowed; backups, schedules, and garbage-collection are all disabled
|
||||
|
||||
1. If you are using any plugins, update the Ark deployment YAML to reference the latest image tag for your plugins. This can be found under the `initContainers` section of your deployment YAML.
|
||||
|
||||
1. Apply your updated Ark deployment YAML to your cluster and ensure the pod(s) starts up successfully.
|
||||
|
||||
1. If you're using Ark's restic integration, ensure the daemon set pods have been re-created with the latest Ark image (if your daemon set YAML is using the `:latest` tag, you can delete the pods so they're recreated with an updated image).
|
||||
|
||||
1. Once you've confirmed all of your settings have been migrated over correctly, delete the Config CRD:
|
||||
```bash
|
||||
kubectl delete -n heptio-ark config --all
|
||||
kubectl delete crd configs.ark.heptio.com
|
||||
```
|
||||
|
||||
|
||||
[1]: api-types/backupstoragelocation.md
|
||||
[2]: api-types/volumesnapshotlocation.md
|
||||
[3]: storage-layout-reorg-v0.10.md
|
||||
[4]: locations.md
|
||||
[5]: get-started.md#download
|
||||
@@ -1,54 +0,0 @@
|
||||
# Use Cases
|
||||
|
||||
This doc provides sample Ark commands for the following common scenarios:
|
||||
* [Disaster recovery][0]
|
||||
* [Cluster migration][1]
|
||||
|
||||
## Disaster recovery
|
||||
|
||||
*Using Schedules and Restore-Only Mode*
|
||||
|
||||
If you periodically back up your cluster's resources, you are able to return to a previous state in case of some unexpected mishap, such as a service outage. Doing so with Heptio Ark looks like the following:
|
||||
|
||||
1. After you first run the Ark server on your cluster, set up a daily backup (replacing `<SCHEDULE NAME>` in the command as desired):
|
||||
|
||||
```
|
||||
ark schedule create <SCHEDULE NAME> --schedule "0 7 * * *"
|
||||
```
|
||||
This creates a Backup object with the name `<SCHEDULE NAME>-<TIMESTAMP>`.
|
||||
|
||||
2. A disaster happens and you need to recreate your resources.
|
||||
|
||||
3. Update the [Ark server Config][3], setting `restoreOnlyMode` to `true`. This prevents Backup objects from being created or deleted during your Restore process.
|
||||
|
||||
4. Create a restore with your most recent Ark Backup:
|
||||
```
|
||||
ark restore create <SCHEDULE NAME>-<TIMESTAMP>
|
||||
```
|
||||
|
||||
## Cluster migration
|
||||
|
||||
*Using Backups and Restores*
|
||||
|
||||
Heptio Ark can help you port your resources from one cluster to another, as long as you point each Ark Config to the same cloud object storage. In this scenario, we are also assuming that your clusters are hosted by the same cloud provider. **Note that Heptio Ark does not support the migration of persistent volumes across cloud providers.**
|
||||
|
||||
1. *(Cluster 1)* Assuming you haven't already been checkpointing your data with the Ark `schedule` operation, you need to first back up your entire cluster (replacing `<BACKUP-NAME>` as desired):
|
||||
|
||||
```
|
||||
ark backup create <BACKUP-NAME>
|
||||
```
|
||||
The default TTL is 24 hours; you can use the `--ttl` flag to change this as necessary.
|
||||
|
||||
2. *(Cluster 2)* Make sure that the `persistentVolumeProvider` and `backupStorageProvider` fields in the Ark Config match the ones from *Cluster 1*, so that your new Ark server instance is pointing to the same bucket.
|
||||
|
||||
3. *(Cluster 2)* Make sure that the Ark Backup object has been created. Ark resources are [synced][2] with the backup files available in cloud storage.
|
||||
|
||||
4. *(Cluster 2)* Once you have confirmed that the right Backup (`<BACKUP-NAME>`) is now present, you can restore everything with:
|
||||
```
|
||||
ark restore create <BACKUP-NAME>
|
||||
```
|
||||
|
||||
[0]: #disaster-recovery
|
||||
[1]: #cluster-migration
|
||||
[2]: concepts.md#cloud-storage-sync
|
||||
[3]: config-definition.md#main-config-parameters
|
||||
@@ -2,22 +2,17 @@
|
||||
|
||||
## Overview
|
||||
|
||||
We are using [dep][0] to manage dependencies. You can install it by running
|
||||
|
||||
```
|
||||
go get -u github.com/golang/dep/cmd/dep
|
||||
```
|
||||
|
||||
Dep currently pulls in a bit more than we'd like, so
|
||||
we have created a script to remove these extra files: `hack/dep-save.sh`.
|
||||
We are using [dep][0] to manage dependencies. You can install it by following [these
|
||||
instructions][1].
|
||||
|
||||
## Adding a new dependency
|
||||
|
||||
Run `hack/dep-save.sh`. If you want to see verbose output, you can append `-v` as in
|
||||
`hack/dep-save.sh -v`.
|
||||
Run `dep ensure`. If you want to see verbose output, you can append `-v` as in
|
||||
`dep ensure -v`.
|
||||
|
||||
## Updating an existing dependency
|
||||
|
||||
Run `hack/dep-save.sh -update <pkg> [<pkg> ...]` to update one or more dependencies.
|
||||
Run `dep ensure -update <pkg> [<pkg> ...]` to update one or more dependencies.
|
||||
|
||||
[0]: https://github.com/golang/dep
|
||||
[1]: https://golang.github.io/dep/docs/installation.html
|
||||
|
||||
26
docs/versions.md
Normal file
26
docs/versions.md
Normal file
@@ -0,0 +1,26 @@
|
||||
# Upgrading Ark versions
|
||||
|
||||
Ark supports multiple concurrent versions. Whether you're setting up Ark for the first time or upgrading to a new version, you need to pay careful attention to versioning. This doc page is new as of version 0.10.0, and will be updated with information about subsequent releases.
|
||||
|
||||
## Minor versions, patch versions
|
||||
|
||||
The documentation site provides docs for minor versions only, not for patch releases. Patch releases are guaranteed not to be breaking, but you should carefully read the [release notes][1] to make sure that you understand any relevant changes.
|
||||
|
||||
If you're upgrading from a patch version to a patch version, you only need to update the image tags in your configurations. No other steps are needed.
|
||||
|
||||
Breaking changes are documented in the release notes and in the documentation.
|
||||
|
||||
## Breaking changes for version 0.10.0
|
||||
|
||||
- See [Upgrading to version 0.10.0][2]
|
||||
|
||||
## Ark versions and Kubernetes versions
|
||||
|
||||
Not all Ark versions support all versions of Kubernetes. You should be aware of the following known limitations:
|
||||
|
||||
- Ark version 0.9.0 requires Kubernetes version 1.8 or later. In version 0.9.1, Ark was updated to support earlier versions.
|
||||
- Restic support requires Kubernetes version 1.10 or later, or an earlier version with the mount propagation feature enabled. See [Restic Integration][3].
|
||||
|
||||
[1]: https://github.com/heptio/ark/releases
|
||||
[2]: upgrading-to-v0.10.md
|
||||
[3]: restic.md
|
||||
15
docs/zenhub.md
Normal file
15
docs/zenhub.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# ZenHub
|
||||
|
||||
As an Open Source community, it is necessary for our work, communication, and collaboration to be done in the open.
|
||||
GitHub provides a central repository for code, pull requests, issues, and documentation. When applicable, we will use Google Docs for design reviews, proposals, and other working documents.
|
||||
|
||||
While GitHub issues, milestones, and labels generally work pretty well, the Heptio team has found that product planning requires some additional tooling that GitHub projects do not offer.
|
||||
|
||||
In our effort to minimize tooling while enabling product management insights, we have decided to use [ZenHub Open-Source](https://www.zenhub.com/blog/open-source/) to overlay product and project tracking on top of GitHub.
|
||||
ZenHub is a GitHub application that provides Kanban visualization, Epic tracking, fine-grained prioritization, and more. It's primary backing storage system is existing GitHub issues along with additional metadata stored in ZenHub's database.
|
||||
|
||||
If you are an Ark user or Ark Developer, you do not _need_ to use ZenHub for your regular workflow (e.g to see open bug reports or feature requests, work on pull requests). However, if you'd like to be able to visualize the high-level project goals and roadmap, you will need to use the free version of ZenHub.
|
||||
|
||||
## Using ZenHub
|
||||
|
||||
ZenHub can be integrated within the GitHub interface using their [Chrome or FireFox extensions](https://www.zenhub.com/extension). In addition, you can use their dedicated [web application](https://app.zenhub.com/workspace/o/heptio/ark/boards?filterLogic=all&repos=99143276).
|
||||
@@ -1,12 +1,13 @@
|
||||
# Examples
|
||||
|
||||
The YAML config files in this directory can be used to quickly deploy a containerized Ark deployment.
|
||||
This directory contains sample YAML config files for running Ark on each core provider. Starting with v0.10, these files are packaged into [the Ark release tarballs][2], and we highly recommend that you use the packaged versions of these files to ensure compatibility with the released code.
|
||||
|
||||
* `common/`: Contains manifests to set up Ark. Can be used across cloud provider platforms. (Note that Azure requires its own deployment file due to its unique way of loading credentials).
|
||||
|
||||
* `minio/`: Used in the [Quickstart][1] to set up [Minio][0], a local S3-compatible object storage service. It provides a convenient way to test Ark without tying you to a specific cloud provider.
|
||||
|
||||
* `aws/`, `azure/`, `gcp/`: Contains manifests specific to the given cloud provider's setup.
|
||||
* `aws/`, `azure/`, `gcp/`, `ibm/`: Contains manifests specific to the given cloud provider's setup.
|
||||
|
||||
[0]: https://github.com/minio/minio
|
||||
[1]: /README.md#quickstart
|
||||
[2]: https://github.com/heptio/ark/releases
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
# Copyright 2017 Heptio Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
---
|
||||
apiVersion: ark.heptio.com/v1
|
||||
kind: Config
|
||||
metadata:
|
||||
namespace: heptio-ark
|
||||
name: default
|
||||
persistentVolumeProvider:
|
||||
aws:
|
||||
region: <YOUR_REGION>
|
||||
backupStorageProvider:
|
||||
bucket: <YOUR_BUCKET>
|
||||
aws:
|
||||
region: <YOUR_REGION>
|
||||
backupSyncPeriod: 30m
|
||||
gcSyncPeriod: 30m
|
||||
scheduleSyncPeriod: 1m
|
||||
restoreOnlyMode: false
|
||||
26
examples/aws/05-ark-backupstoragelocation.yaml
Normal file
26
examples/aws/05-ark-backupstoragelocation.yaml
Normal file
@@ -0,0 +1,26 @@
|
||||
# Copyright 2018 the Heptio Ark contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
---
|
||||
apiVersion: ark.heptio.com/v1
|
||||
kind: BackupStorageLocation
|
||||
metadata:
|
||||
name: default
|
||||
namespace: heptio-ark
|
||||
spec:
|
||||
provider: aws
|
||||
objectStorage:
|
||||
bucket: <YOUR_BUCKET>
|
||||
config:
|
||||
region: <YOUR_REGION>
|
||||
24
examples/aws/06-ark-volumesnapshotlocation.yaml
Normal file
24
examples/aws/06-ark-volumesnapshotlocation.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
# Copyright 2018 the Heptio Ark contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
---
|
||||
apiVersion: ark.heptio.com/v1
|
||||
kind: VolumeSnapshotLocation
|
||||
metadata:
|
||||
name: aws-default
|
||||
namespace: heptio-ark
|
||||
spec:
|
||||
provider: aws
|
||||
config:
|
||||
region: <YOUR_REGION>
|
||||
50
examples/aws/10-deployment-kube2iam.yaml
Normal file
50
examples/aws/10-deployment-kube2iam.yaml
Normal file
@@ -0,0 +1,50 @@
|
||||
# Copyright 2018 the Heptio Ark contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: heptio-ark
|
||||
name: ark
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: ark
|
||||
annotations:
|
||||
iam.amazonaws.com/role: arn:aws:iam::<AWS_ACCOUNT_ID>:role/<HEPTIO_ARK_ROLE_NAME>
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "8085"
|
||||
prometheus.io/path: "/metrics"
|
||||
spec:
|
||||
restartPolicy: Always
|
||||
serviceAccountName: ark
|
||||
containers:
|
||||
- name: ark
|
||||
image: gcr.io/heptio-images/ark:latest
|
||||
ports:
|
||||
- name: metrics
|
||||
containerPort: 8085
|
||||
command:
|
||||
- /ark
|
||||
args:
|
||||
- server
|
||||
volumeMounts:
|
||||
- name: plugins
|
||||
mountPath: /plugins
|
||||
volumes:
|
||||
- name: plugins
|
||||
emptyDir: {}
|
||||
64
examples/aws/10-deployment.yaml
Normal file
64
examples/aws/10-deployment.yaml
Normal file
@@ -0,0 +1,64 @@
|
||||
# Copyright 2017 the Heptio Ark contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: heptio-ark
|
||||
name: ark
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: ark
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "8085"
|
||||
prometheus.io/path: "/metrics"
|
||||
spec:
|
||||
restartPolicy: Always
|
||||
serviceAccountName: ark
|
||||
containers:
|
||||
- name: ark
|
||||
image: gcr.io/heptio-images/ark:latest
|
||||
command:
|
||||
- /ark
|
||||
args:
|
||||
- server
|
||||
## uncomment following line and specify values if needed for multiple provider snapshot locations
|
||||
# - --default-volume-snapshot-locations=<provider-1:location-1,provider-2:location-2,...>
|
||||
volumeMounts:
|
||||
- name: cloud-credentials
|
||||
mountPath: /credentials
|
||||
- name: plugins
|
||||
mountPath: /plugins
|
||||
- name: scratch
|
||||
mountPath: /scratch
|
||||
env:
|
||||
- name: AWS_SHARED_CREDENTIALS_FILE
|
||||
value: /credentials/cloud
|
||||
- name: ARK_SCRATCH_DIR
|
||||
value: /scratch
|
||||
#- name: AWS_CLUSTER_NAME
|
||||
# value: <YOUR_CLUSTER_NAME>
|
||||
volumes:
|
||||
- name: cloud-credentials
|
||||
secret:
|
||||
secretName: cloud-credentials
|
||||
- name: plugins
|
||||
emptyDir: {}
|
||||
- name: scratch
|
||||
emptyDir: {}
|
||||
69
examples/aws/20-restic-daemonset.yaml
Normal file
69
examples/aws/20-restic-daemonset.yaml
Normal file
@@ -0,0 +1,69 @@
|
||||
# Copyright 2018 the Heptio Ark contributors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: restic
|
||||
namespace: heptio-ark
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: restic
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: restic
|
||||
spec:
|
||||
serviceAccountName: ark
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
volumes:
|
||||
- name: cloud-credentials
|
||||
secret:
|
||||
secretName: cloud-credentials
|
||||
- name: host-pods
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/pods
|
||||
- name: scratch
|
||||
emptyDir: {}
|
||||
containers:
|
||||
- name: ark
|
||||
image: gcr.io/heptio-images/ark:latest
|
||||
command:
|
||||
- /ark
|
||||
args:
|
||||
- restic
|
||||
- server
|
||||
volumeMounts:
|
||||
- name: cloud-credentials
|
||||
mountPath: /credentials
|
||||
- name: host-pods
|
||||
mountPath: /host_pods
|
||||
mountPropagation: HostToContainer
|
||||
- name: scratch
|
||||
mountPath: /scratch
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: HEPTIO_ARK_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: AWS_SHARED_CREDENTIALS_FILE
|
||||
value: /credentials/cloud
|
||||
- name: ARK_SCRATCH_DIR
|
||||
value: /scratch
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user