diff --git a/changelogs/unreleased/8085-Lyndon-Li b/changelogs/unreleased/8085-Lyndon-Li new file mode 100644 index 000000000..f063cdfc1 --- /dev/null +++ b/changelogs/unreleased/8085-Lyndon-Li @@ -0,0 +1 @@ +According to design #7576, after node-agent restarts, if a DU/DD is in InProgress status, re-capture the data mover ms pod and continue the execution \ No newline at end of file diff --git a/changelogs/unreleased/8093-Lyndon-Li b/changelogs/unreleased/8093-Lyndon-Li new file mode 100644 index 000000000..a43c47e09 --- /dev/null +++ b/changelogs/unreleased/8093-Lyndon-Li @@ -0,0 +1 @@ +Fix issue #7620, add backup repository configuration implementation and support cacheLimit configuration for Kopia repo \ No newline at end of file diff --git a/changelogs/unreleased/8096-Lyndon-Li b/changelogs/unreleased/8096-Lyndon-Li new file mode 100644 index 000000000..9c0e2dd0d --- /dev/null +++ b/changelogs/unreleased/8096-Lyndon-Li @@ -0,0 +1 @@ +Fix issue #8072, add the warning messages for restic deprecation \ No newline at end of file diff --git a/config/crd/v1/bases/velero.io_backuprepositories.yaml b/config/crd/v1/bases/velero.io_backuprepositories.yaml index d5cc0c51b..00818bc5e 100644 --- a/config/crd/v1/bases/velero.io_backuprepositories.yaml +++ b/config/crd/v1/bases/velero.io_backuprepositories.yaml @@ -54,6 +54,13 @@ spec: description: MaintenanceFrequency is how often maintenance should be run. type: string + repositoryConfig: + additionalProperties: + type: string + description: RepositoryConfig is for repository-specific configuration + fields. + nullable: true + type: object repositoryType: description: RepositoryType indicates the type of the backend repository enum: diff --git a/config/crd/v1/crds/crds.go b/config/crd/v1/crds/crds.go index 8722e3686..108949343 100644 --- a/config/crd/v1/crds/crds.go +++ b/config/crd/v1/crds/crds.go @@ -29,7 +29,7 @@ import ( ) var rawCRDs = [][]byte{ - []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xbcVMo\xdc6\x10\xbd\xef\xaf\x18\xa4\xd7J\x9b\xa0=\x14\xba%n\v\x04M\x02cm\xf8>\x92fw\x19S$K\x0e\xd7\xdd~\xfc\xf7bHɫ\x95do\xec\x02\xd5M\xc3\xe1㛯G\x16E\xb1B\xa7\xee\xc8\aeM\x05\xe8\x14\xfd\xc1d\xe4/\x94\xf7?\x85R\xd9\xf5\xe1\xdd\xea^\x99\xb6\x82\xab\x18\xd8v\x1b\n6\xfa\x86~\xa6\xad2\x8a\x955\xab\x8e\x18[d\xacV\x00h\x8ce\x14s\x90_\x80\xc6\x1a\xf6Vk\xf2ŎLy\x1fk\xaa\xa3\xd2-\xf9\x04>\x1c}x[\xbe\xfb\xb1|\xbb\x020\xd8Q\x0556\xf7\xd1yr6(\xb6^Q(\x0f\xa4\xc9\xdbR\xd9Up\xd4\b\xfa\xce\xdb\xe8*8-\xe4\xdd\xfdə\xf5\x87\x04\xb4\x19\x80\x8eiI\xab\xc0\xbf-.\x7fR\x81\x93\x8b\xd3ѣ^\"\x92\x96\x832\xbb\xa8\xd1\xcf\x1c\xe4\x80\xd0XG\x15|\x11.\x0e\x1bjW\x00}\xa4\x89[\x01ض)w\xa8\xaf\xbd2L\xfe\xca\xea\xd8\r9+\xe0k\xb0\xe6\x1ay_A9d\xb7l<\xa5\xc4ު\x8e\x02c\xe7\x92\uf430\xf7;\xea\xff\xf9(\x87\xb7\xc84\a\x93̕'\xae\xb7GGg(\xa7D\xc0h-#\x06\xf6\xca\xecV'\xe7û\x9c\x8afO\x1dV\xbd\xafud\xde_\x7f\xbc\xfb\xe1\xe6\xcc\f\xe0\xbcu\xe4Y\r\xe5\xc9ߨ\xfdFV\x80\x96B\xe3\x95\xe3\xd4\x1c\x7f\x17gk\x00r@\xde\x05\xad\xf4!\x05\xe0=\r9\xa6\xb6\xe7\x04v\v\xbcW\x01<9O\x81L\xeeL1\xa3\x01[\x7f\xa5\x86\xcb\t\xf4\ry\x81\x81\xb0\xb7Q\xb7Ҿ\a\xf2\f\x9e\x1a\xbb3\xea\xcfG\xec\x00lӡ\x1a\x99\x02C\xaa\xa2A\r\aԑ\xbe\a4\xed\x04\xb9\xc3#x\x923!\x9a\x11^\xda\x10\xa6<>[O\xa0\xcc\xd6V\xb0gv\xa1Z\xafw\x8a\x87\xa1ll\xd7E\xa3\xf8\xb8N\xf3\xa5\xea\xc8ևuK\a\xd2\xeb\xa0v\x05\xfaf\xaf\x98\x1a\x8e\x9e\xd6\xe8T\x91\x021i0ˮ\xfd\xce\xf7c\x1cΎ\x9d\x15:\x7fi\x92^P\x1e\x19-P\x01\xb0\x87\xca!\x9e\xaa &I\xdd旛[\x18\x98\xe4J墜\\gy\x19\xea#\xd9TfK>\xef\xdbz\xdb%L2\xad\xb3\xcap\xfai\xb4\"\xc3\x10b\xdd)\x966\xf8=R`)\xdd\x14\xf6*\t\x17\xd4\x04\xd1\xc9\xe8\xb4S\x87\x8f\x06\xae\xb0#}\x85\x81\xfe\xe7ZIUB!E\xf8\xa6j\x8d\xe5x\xea\x9c\xd3;Z\x18\xa4\xf4\x89\xd2N\xe5\xf1\xc6Q#\x95\x95\xe4\xcaV\xb5UM\x9e\xa9\xad\xf5\x803\xff\xf3L-K\x80|YDo\xd8z\xdc\xd1'\x9b1\xa7N\x97\xdaN\xbe\x0fK@\x03c\x91\xad\xac\t\xb4\xec\xb8\x00\xc8{\xe4\x91\x180*\xf3\xa8)\x8bA>S\x99T\x1d\x14\xa50h\x1a\xfa5\xf5\xa3i\x8e\x17\x02\xfd\xbc\xb0EB\xda\xdb\a\xb0[&3\x06\xed\xb9.DR\x13\xf8h^D\xf6\xfc\xa6\xb8@ss\xe6\fʴ\xd2\x1b\xbd4\xcb!C\xea\xa5\xd8dZ\xf0\xe7\x97\xf2\xf8#\x13\xbb\xf9q\x05\xdc[\xa7p\xc1\xee)\xb0j\x16\x16\u07bcyY\xbc\x02\xf3\xb1\x95\xe1\xdb*\xf2\xaf\xe9\xc0\xcd\x04ch\xbemԺ?\xa0hl\xe7\x90U\xadiPH\x19\x1f\x95\xf7\x1c\xe7\xbc\x12\xed\xff\xd0t\ay]\xd0\xe3{\xe45aݝC\x8cG*\x1b\x12\xbf<\xc7#\x9a\xc3̄\x05Hg۞Y\xbf/H\x1a^\x10\x98\f\x83\xf24\xb9\x9b\x8ae5\x99\xf8,\xcd\xe1\xc4e\xda\r\x93\xe5IR\xbfIm\x199\x86\x97\xe8m\xda0$\xbb\x89ާ\xfb,[\xe5\x19\xf3j\xc5\xd5\x18x$,\xf2\xa8\xbc\xd0\x16\x9f\xe6;\x06b\x02\x06,\x86\xb1\x12=\xe0R\xd5\x175hk}\x87\x9c_\xad\x85\x00\xcdg\xaf\xfcl\xe9\xb7\x00\xd66\xf2\x13\xa9\xe7\xfd\x9c\x05\\(\xc7\x05\xa6n\x8f\xe1\x12\xcfk\xf1Yj\x88\xc9\xcd\xf6\x1c\x85\xa7\xd4\xf5\v=,X7\x84\xed\\\xa1\v\xf8byy\xe9\xc9\b\x17\xa7bf\f\xf2\xc2kGu\x0ey\x90ǖX?>`+\xf8\xeb\x9fտ\x01\x00\x00\xff\xff\xdd}\xa6m\xca\x0e\x00\x00"), + []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xbcVMo\xe36\x10\xbd\xfbW\f\xb6\xd7Jޠ=\x14\xba\xed\xba-\x104\t\x02'ȝ\x92F27\x14ɒC\xa7\xee\xc7\x7f/\x86\x94bY\x92כ\x14\xa8n\"\x87o>\xde\xcc#\xb3,[\t+\x9f\xd0yit\x01\xc2J\xfc\x83P\xf3\x9fϟ\x7f\xf2\xb94\xeb\xfd\xd5\xeaY꺀M\xf0d\xba-z\x13\\\x85?c#\xb5$i\xf4\xaaC\x12\xb5 Q\xac\x00\x84ֆ\x04/{\xfe\x05\xa8\x8c&g\x94B\x97\xb5\xa8\xf3\xe7Pb\x19\xa4\xaa\xd1E\xf0\xc1\xf5\xfec~\xf5c\xfeq\x05\xa0E\x87\x05\x94\xa2z\x0e֡5^\x92q\x12}\xbeG\x85\xce\xe4Ҭ\xbcŊ\xd1[g\x82-ฑN\xf7\x9eSԟ#\xd0v\x00:\xc4-%=\xfd\xb6\xb8}#=E\x13\xab\x82\x13j)\x90\xb8\xed\xa5n\x83\x12nf\xc0\x0e|e,\x16pDZXQa\xbd\x02\xe83\x8d\xb1e \xea:\xd6N\xa8{'5\xa1\xdb\x18\x15\xba\xa1f\x19|\xf1F\xdf\v\xda\x15\x90\x0f\xd5\xcd+\x87\xb1\xb0\x8f\xb2CO\xa2\xb3\xd1v(ا\x16\xfb\x7f:\xb0\xf3Z\x10\xce\xc1\xb8r\xf91\xd6ǃ\xc5\x13\x94c!`\xb4\x97\x10=9\xa9\xdb\xd5\xd1x\x7f\x95JQ\xed\xb0\x13Eok,\xeaO\xf7\xd7O?<\x9c,\x03Xg,:\x92\x03=\xe9\x1b\xb5\xdfh\x15\xa0F_9i)6\xc7\xdf\xd9\xc9\x1e\x00;H\xa7\xa0\xe6>D\x0f\xb4á\xc6X\xf71\x81i\x80v҃C\xebУN\x9d\xc9\xcbB\x83)\xbf`E\xf9\x04\xfa\x01\x1dÀߙ\xa0jn\xdf=:\x02\x87\x95i\xb5\xfc\xf3\x15\xdb\x03\x99\xe8T\tBO\x10Y\xd4B\xc1^\xa8\x80߃\xd0\xf5\x04\xb9\x13\ap\xc8>!\xe8\x11^<\xe0\xa7q\xdc\x1a\x87 uc\n\xd8\x11Y_\xac\u05ed\xa4a(+\xd3uAK:\xac\xe3|\xc92\x90q~]\xe3\x1e\xd5\xda\xcb6\x13\xae\xdaI\u008a\x82õ\xb02\x8b\x89\xe88\x98yW\x7f\xe7\xfa1\xf6'ngD\xa7/N\xd2\x1b\xe8\xe1\xd1\x02\xe9A\xf4P)\xc5#\v\xbcĥ\xdb\xfe\xf2\xf0\bC$\x89\xa9D\xca\xd1tV\x97\x81\x1f\xae\xa6\xd4\r\xbat\xaeq\xa6\x8b\x98\xa8kk\xa4\xa6\xf8S)\x89\x9a\xc0\x87\xb2\x93\xc4m\xf0{@OL\xdd\x14v\x13\x85\vJ\x84`yt\xea\xa9\xc1\xb5\x86\x8d\xe8Pm\x84\xc7\xff\x99+f\xc5gL\xc27\xb15\x96\xe3\xa9q*\xefhc\x90\xd23\xd4N\xe5\xf1\xc1b\xc5\xccrq\xf9\xa8ld\x95f\xaa1\x0e\xc4\xcc\xfe\xb4R\xcb\x12\xc0_\x12\xd1\a2N\xb4xc\x12\xe6\xd4\xe8R\xdb\xf1\xf7y\th\x88\x98e+i\x02.\x1b.\x00\xd2N\xd0H\fHH\xfd\xaa)\x8bI~\x85\x99Ȏ`\xa5\xd0BW\xf8k\xecG]\x1d.$z\xbbp\x84Sڙ\x170\r\xa1\x1e\x83\xf6\xb1.dR\"\xb8\xa0\xdf\x14\xec1Ǎэl灎/\xb2s\xe4^p2\xc9v;\xf1ərs\x1dcɆ\xcecB\x1a\xd9\x06w\x8e\xbcF\xa2\xaag\x12\x02\xa0\x83R\xa2TX\x00\xb9\x80g*2\x9b\x95ӊ\xf0\xfdx\x81\xb8\xed\x891H]\xf3\xb4\xf4\x97\x15;\x19\x9a\x91\xdb\x1fu\r\xee\xf4\x992\xfeP\x87n\xee.\x83gc\xa5XXw\xe8IV\v\x1b\x1f>\xbc\xad\x03\x18\xe6\xbaf9j$\xba\xf7\xcc\xe4v\x821\x8cc\x13\x94\xea\x1dd\x95\xe9\xac Y*\x1c\xee\f\xe6\\\xa63\x87\xa5\xa6\x81\xff4\x86{~o\xe1\xeb\v\xed=i=\x9dB\x8cE&-\xc4\xf8\x92\xb2\x8d\xc2\x1cT\xc4/@ZS\xf7\x91\xf5\xe7b\xeb\xbf!1\x96\a\xe9pr[g\xcb\xfa:\xb1YR\xa6\x89ɴ\x1b&ۓ\xa2~\xd3\xfdC\x82\x82\x7f\xcb\r\x14\x0f\fŮ\x82s\xf1\x86O\xab\xfc\xb0{\xf7\x1d\xa4\x84\xa7\x91\xd4\xf23\xfbB[\xdc\xccO\f\x811\x18\x10/\x8c\xb5\xf9E,\xb1\xbe\xa8ʍq\x9d\xa0\xf4\x8e\xcf\x18\xe8}\"\xb6|\a\xa1\xf7\xa2\xbd\x94\xddm\xb2J\x0f\xb9\xfe\b\x88\xd2\x04:Sz\xdaͣ\x80\vt\\\x88\xd4\ue13f\x14\xe7=\xdb,5\xc4\xe4\xae\xffZ\b\xe7\xd4\xf5\x0e_\x16V\xb7(\xea\xb9Bgpghy\xebl\x86\x8bS1[\xf4\xfc\xe6\xadG<\xfb4\xc8\xe3\x95P\xbe>\xe9\v\xf8\xeb\x9fտ\x01\x00\x00\xff\xff\x12%\xb58\xdc\x0f\x00\x00"), []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xec}_s\xdb8\x92\xf8{>\x05ʿ\x87\xd9ݒ\xecI\xfd\xf6\xe1\xcao\x19'\xb9Q\xedL\xe2\x8a=\xd9g\x88lI\x18\x83\x00\x17\x00ek\xef\xee\xbb_\xa1\x01\xf0\x8f\b\x92\xa0,{\xb2{\xe1Kb\x11l\x00ݍ\xeeFw\xa3\xb1\\.\xdfВ}\x05\xa5\x99\x14ׄ\x96\f\x9e\f\b\xfb\x97\xbe|\xf8\x0f}\xc9\xe4\xd5\xfe\xed\x9b\a&\xf2krSi#\x8b/\xa0e\xa52x\x0f\x1b&\x98aR\xbc)\xc0М\x1az\xfd\x86\x10*\x844\xd4\xfe\xacퟄdR\x18%9\a\xb5܂\xb8|\xa8ְ\xae\x18\xcfA!\xf0\xd0\xf5\xfe\xc7˷\x7f\xbd\xfc\xf1\r!\x82\x16pM\xd64{\xa8J}\xb9\a\x0eJ^2\xf9F\x97\x90Y\x90[%\xab\xf2\x9a4/\xdc'\xbe;7ԟ\xf0k\xfc\x813m\xfe\xd6\xfa\xf1\x17\xa6\r\xbe(y\xa5(\xaf{\xc2\xdf4\x13ۊS\x15~}C\x88\xced\t\xd7\xe4\x93\xed\xa2\xa4\x19\xe4o\b\xf1\xa3\xc6.\x97~\xc0\xfb\xb7\x0eB\xb6\x83\x82\xba\xb1\x10\"K\x10\xefnW_\xff\xff]\xe7gBrЙb\xa5\xc1\xb9\xff\xf7\xb2\xfe\x9d\xf8Q\x12\xa6\t%_q\x8eDy\x94\x13\xb3\xa3\x86((\x15h\x10F\x13\xb3\x03\x92\xd1\xd2T\n\x88ܐ\xbfUkP\x02\f\xe8\x16\xbc\x8cWڀ\"\xdaP\x03\x84\x1aBI)\x990\x84\tbX\x01\xe4O\xefnWD\xae\x7f\x87\xcchBEN\xa8\xd62c\xd4@N\xf6\x92W\x05\xb8o\xff|YC-\x95,A\x19\x16\x90\xee\x9e\x16'\xb5~\x1d\x9b\xab},z\xdcW$\xb7,\x05nZ\x1eŐ{\x8c\xda\xf9\x99\x1d\xd3\xcd\xf4\x91\xc9\xec\xcfT\xf8\xe1_\x1e\x81\xbe\x03e\xc1\x10\xbd\x93\x15\xcf-'\xeeAY\x04fr+\xd8?kؚ\x18\x89\x9drj@[\xcc\x18P\x82r\xb2\xa7\xbc\x82\x85E\xca\x11\xe4\x82\x1e\x88\x02\xdb'\xa9D\v\x1e~\xa0\x8f\xc7\xf1\xabT@\x98\xd8\xc8k\xb23\xa6\xd4\xd7WW[f\xc2\xfa\xcadQT\x82\x99\xc3\x15.\x15\xb6\xae\x8cT\xfa*\x87=\xf0+ͶK\xaa\xb2\x1d3\x90Y2_ђ-q\"\x02\xd7\xd8e\x91\xff\xbf\xc0\x1e\xbaӭ9X\xb6\xd5F1\xb1m\xbd\xc0\xf51\x83\xe7\f\x84!\xbaZ\x17\xccX6\xf8G\x05ڮ\x01y\f\xf6\x06e\x10Y\x03\xa9\xcaܲ\xf1q\x83\x95 7\xb4\x00~C5\xbc2\xad,U\xf4\xd2\x12!\x89Zm\xc9z\xdcء\xb7\xf5\"\b\xc8\x01\xd2:\xc1rWB\xd6Yh\xf6+\xb6a\x99[N\x1b\xa9\x1a\xb9\xe3d`\x17C\xf1\xa5o\x9fL\xb3;AK\xbd\x93\xe6\x9e\x15 +s\xdcb\x8aאxw\xab#(a\x84~\xbc(\xb3*\r\xb9]\xb4\x8f\x94\x19\x1c\xf3\xcd݊|Ea\x15\xbeF\xa1Uib*%,\x97D\xfa\xfa\x024?\xdc\xcb\xdf4\x90\xbcB\xe6\xce\x14 \x1e\x16d\r\x1b\xcb\t\n\xec\xf7\xf6\x15(eq\xa3q\x00\xb2\xea\t\x1b\xfb\xdc\xef\xc0\xe2\x96V\xdc\xf8u\xc24y\xfb#)\x98\xa8L\x8f\xd5\x06\xa9\x8e\x98\xa2\x86\x16r\x0f\xea\x14$\xbe\xa7\x86\xfej?>\u009d\x05J\x10\xaaE\xde\xda\xe3q}\xc0\x971j\xbbg\xb5iAd\x9a\\\\\x10\xa9ȅ\xd3\xc0\x17\v\xf7uŸY2\xd1\xee\xe3\x91q\x1ez\x997y\x87CGP}/?jǼ'\xe1b\x00V\v5\x8f;0;P\xa4\x94\xb5\xc6\xdb0\x0eD\x1f\xb4\x81\xc2#&h\x11?\x9fHO\xb8v8\xf7 \xb4ū\x9fH\x7f\xf2\xa2✮9\\\x13\xa3*\x18\xc0\xcdZJ\x0eTL \xe7\vhòs\xa0\xc6A\x8a F\xf9\x17\x1d\f\xa0Ҥ\x0f@h\x04\xb4Ǚ\xd5Μ\xb7\x10\xdb\xc5ʛ\xe8\xa0J\x05\x99\x15\xdb\xd7^\x1d0ਂ\x84$\\\x8a-(\u05fd5U\x02\x87)\xb0\x1c\x97\x13+i\x15p\xabNȦ\xb2B\xf8\x92\xd8\xe5=\xc8\x04Lh\x034\u009d\xcf \x10\x8cB\xf4꙳\f\xad@o\xf0-\xd1p\x8d\xf1i\xa3\xa5\x0f%8\xdb\xd9\xd2\xd2\x0f\xbbQ\xbf\xa3\x02A\x83\xb1\x1f]\xfc\xe5b\x81$\xee\xf6\xda\xedC\x13\xaa\xa0FK\xb2\xe0\x84\xa24\x87~kf\xa0\x88`qT\xa0$ғ*E\x0f\x03Ԭ7\x00g\xa4\xe7\x10\xcc#\x8a\x8a\xd0\xec\x95iz\xdc\xef\xbf3U\xcfCG\x8d\xdb]ʄ\xa5\x9f\xddyvȧ\xdd\x06\u03a2MH\x13\x81DŽ\x83\x87{\xb3\x11j\xfdA\xc8:\v\xcf\x0f1y\xcd[\x9ey\xff%1\xb5\x93\xf2a\n;?\xdb6ͮ\x88d\xe8V!k\xd8\xd1=\x93\xcaO\xbdѵ\xf0\x04Ye\xa2\xab\x9e\x1a\x92\xb3\xcd\x06\x94\x85S\xee\xa8\x06\xed\xf6\xc9\xc3\b\x19\xb6\xdfIK\x8cD_\x1eͣ!\xa4%\x13\xce|h\xe8\u05908֒\xe1\xb1\x03\xb5\xf65*\xe3\x9c\xedY^Q\x8ez\x99\x8a\xcc͇\xd6\xe3\x8aI\x99\x11\"\xf7\xc6\x1c\xe5L\xf78\x83 L\xca\x12\xa9\xb3U\x92\x02\xac\xd1[\xd8MA\xbf\xe9\xf0\xcc\xd7\xd4\xda*rh\xf6\x04\x89\xa5*\x0e\xdaw\x95\xa3\x1d\xd9ȌEC\x14\xf4D\x10N\xd7\xc0\x89\x06\x0e\x99\x91*\x8e\x91):\xbb'E\b\x0e 2\"\xf9\xba[\x8df\x02# \t\xee\xe1v,\xdb9S\xcf2\x11\xc2!\xb9\x04k\xf0\x19B˒G\xd4E\xf3\x8c\x12\xdfw2\xb6֛gb\xd5\x1fË\xad\xff\xe6I\x90\x99\xcd\x13Em\xb3\xbe\xba\x98\xad\xd9!\xbe\xa9m\x9e\x7fO\xc4\x06\xc9\x7f\x02ӎ\xac~\x82n\xa1d\x9e\x1e\xe4[\x8bU\x06\xfaҚSh\xe9,\b3\xe1ש\x95б\xb9z\u07b2\x0e\x12\xbem\xda\xccg\xfaDҤ\xac\x89\x17\"L\xddſ ]Pe\xdcy\x8d\x91L\x93_\xda_-\b\xdb\xd4H\xcf\x17dø\x01u\x84\xfd\x93D}\xa0\xcc9\x90\x91\xa2\xf5\b\xfa\xefM\xb6\xfb\xf0dM0݄\xaa\x12\xf1r\xfc\xb13d\x83\xb5\xdfU\xcf\x13p\t\xfa\xb1\x99\x82\x02\xfd\xe3\xb8cj\xff\x82\xa6ջO\xef\xe3\xfb\xab\xf6\x93\xc0y\xbd\x89L,:\xf7\xbc;\x9aQ{|ބ\x0fo\xd0\x06\xaa7@.\x16\xb2 \x94<\xc0\xc1\x99.T\x10K\x1f\x1a\x1a't\xaf\x00\x832\xc8g\x0fp@0\xf1(K\xffI\xe5\x06\xf7<\xc0!\xa5\xd9\x11\x0e혘\xf6\xd1#\x8b'\xfb\x03\"\x02\x9d\xeb\xa9l\xe0\x1e\xbf\x14\"1\x8d\xf8\x93(K\xc2\x13p\x7f\xc24\x93X\xa5\xddG;L\x89\x1c\xf0\x83v\xb4\xb4+f\xc7J\x14\xab\xe8q\x90\x9bd\x82\xba\xe7+\xe5,\xaf;rkd%\x16\xe4\x934\xf6\x9f\x0fOL\xfbH\xe6{\t\xfa\x934\xf8ˋ`\xd4\r\xfc%\xf1\xe9z\xc0\x85&\x9c\x94\xb7\bk\xc7\xe2\x9cN\xb3\xdcV\xe3\x9ei\xb2\x12v\xbb\xe2P\x92\xd8\x15\x86]]w\xae\xa3\xa2\xd2\x18F\x13R,\x9d\xdb&֓ǷT\x1dt?\xbbS\xdf\xe1\xbdU\x16\xee\x8d\v\xfer\x9aA\x1e\xe25\x18\x95\xa4\x06\xb6,K\xec\xaf\x00\xb5\x05RZ\x11\x9e\xc6\x11\x89\x82\xd5\xcff\x1e\xfb\xa4i\xef\xf0x\xc1\x9bO\x0ffi\x17\\B\xab@\xc6ɦ\x03\x11\xc7\xe1\xa6\xd33B-\x8a&\xc6$vi\x9ec\xa2\t\xe5\xb73$\xfa\fZ\xcc]\x9a\xad\xb1;\x15XP\x8cu\xfc\x97\xd5t\xc8\xcd\xffCJʔ\xbe$\xef0\xa7\x84C\xe7\x9dwZ\xb5\xc0$t\x899!\x96\x05\xf6\x94[\xddk\x05\xa8 \xc0\x9d&\x96\x9b\x9e]\xb2 \x8f;\xa9\x9dڬ\x83(\x17\x0fpp!\xbb\xc9.ۋ\xfcb%.\x9c\x0e\xef-\xd8Z\xe1K\xc1\x0f\xe4\x02\xdf]<ǔId\xb6\xc4fOˇ:-fY\xd0r\xe9\x19\xd4\xc8bDh`NO\xaa\xa1l7\x8c\xc1\b\xb0\x1fֹ*\xd6\xc8\x1d\x9bm\x12\x8b\x96RG\"\xe9\x03C\x99`\xde[\xa9\x8d\xf3Wul֨CK\x06'\x16\xa1\x1b\x97@$U\xc8\xf6\xb0Bq\xca\xf5\xda~\xeew\xa0\xc1\xc7\v\xbcc\xcc\x01\xb5;\xab\x8bf};i{\xe1\xe2\x15\xd8\t\xcd\xd0b\xc0oK%3\xd0\xd1`r\xf3$\xc8\xebHZD{\xee\xb5Ϗ\xba]\x8aˉ\x18wA\x86'\xdd䴈\x98i\xaf\x7fxj9$\xedڷ\x7fO\xf1\xd8\xdcq\x11\xcc\xd9+\nz\x9c'\x944\xc4\x1b\xf7eX\r\x1e\x903\xfeնBI\x90\xaaKk\x06\xfc\x16\x14u\xc1\xc4\n; oϮ\xd8I\x90\xa1\xb1l\x8f\xd8s\x9a)y\x13:i\xa8S\xff\xe0\x96r)\xd1U\xaf\xa0C\xbc\xbeW\x1b\xed@!M\xcb!0\xc3\xdc+e\xfe\x83&\x1b\xa6\xb4i\x0fA\x0f\xe4\x89D\xc1\xcc\xdc\xf8\x88\x0fJ\x9d\xb4\xef\xf9\xec\xbel\xb9\x9bv\xf21\xe4G9\xc4$\xce\x1c\xe3;@؆0C@d\xb2\x12\xe8@\xb1\xeb\x18\xbbp\xc8u\x12\x96\xa5.\x92\xb4\xd5o\x1f\x10U\x91\x86\x80%r\n\x13\xa3\x9e\x96v\xf3\x8f\x94\xf1\x97 \x9b\x19J#\x8b=\xa7\xad\x89\x90c\xd6Έ+\xe8\x13+\xaa\x82\xd0\xc2\xd2\b\x959+\xa0K\xf4&\xf3\xcc~\x81j\xc2H\xbbbJ\x0e\x06|\xf6X\xe2\x182)4ˡV\xae\x9e\x11\xa4 \x94l(\xe3\x95J\x94\x80\xb3\xd0;g7\xe1%\xc1\xf9\xb6\ti\x9d/\x11\x15\t\xde\xd4D[q\\\x1a\x97*\xdd\xe2\x9b2\xb3\x14̷\xb2J\xc5$\xe6\xe5\x9d\xd9\xd0\xf2\x99\x8cT\x1c\xbe[Z\xa9C\xfdni\x8d=\xdf-\xad\x89绥\xf5\xdd\xd2Ji\xf9\xdd\xd2\xfani\xb5\x9f\xff\x13\x96\xd6Ԉ܁\xba\x81\x97\x93\xa3H\b\x15\x8f\rq\x04\xbeOn\xf09\xd8\xcfʅ\\\xc5AE2\xef\aҪcB\xabQ\x1eur\xa4]5\x81\xe7\xdd\xf9\x9e\tS\xf2\x19Y\xef\xa1\xd3\xf3e\xbd\xafF!\x9e)\xeb\xdd\x0f{\xda\xc6>)\xe7= e^v\xf4\xc2'J\x14@\x83[݅\xc1c\xf3\x1a␉\xfe_91\xb6\x97\xb5uF\xfex\xf1,\xfad\x1e\x89\x92\xf4\xe2/\x17\xdf\x1e\xfaσ\xf0A\x14\xf7q\xe7\x0f\x18G\xa0\xda\x1dh;-\xab\x9b\x05\xf7m\xb2\xf1Y\xf865\x13\xbeFb\x04V\x97%\x8f\xb0\xf8\xad\xca\x02\x03\xc5\xe7\xd2k\xa4g\x1c\x15]E\xe0$\x1d\x16\xa5\xfa \xb2\x9d\x92BV\xda{%,\xacw\x99;Q\x1e@Ƙ5\xba\xc2\xffJv\xb2\x8adb\x8f\xa0o\"#oz\xf2\x9d\xe4<\x1f\x84\x06C\xf7o/\xbbo\x8c\xf4\xa9z䑙]\x04\xd0\xe3\x0e\x04F\xd8Ŷ\x9d\x80\x1f\n\x02\xf8\x93\xf1\xc7\f\x16\x01$\x15\x11\x8c;Ϋ\xcb\t\xb4\xf9\x8e|.\x9d\xefi\xb6\xdd1\xeeSIK\xe6;9\x85\xaf\x9b\xa27`\x97\u038dv\x9f\xe5\xc8\xc2\x1f\x92\x9a7?!/\xc5#6\x91|wB\xca]bn\xef\xb3\xc3\xf3)Iusv\xcc/\x96@w\xfe\xb4\xb9$\xfcL\xa7\xc8\xcd\xc1\u038b\xa7ýb\x12\xdc뤾%&\xbc\x9d/s\xfd\x1c\x1e\x80\xe1\xf4\xb5ɤ\xb5I\x0f\xc1\xf8\xf8&\xd3\xd2\xe6$\xa3Mb,\x8d\xf5_-\xdd\xecՒ\xcc^7\xb5l\x94%F_\xceI\x1e\x8b\xd7j!\x93\n\x90\xbf\x16\xb3\x9d\x8a\x06\xa9:&\xe5I{\x9e\xcfG0,ჹ\xf5JvkQq\xc3J\x8e\xc1\xcd=ˣ\x0e\x00\xb3\x83C]T\xe2w\x89\xc71}y\x94\xcf_j\xae\xbd<\xb2\xbe\xa9&\x8f\xc09\xa1\xb1u՛y\xe6\xca\x13er\tVG\xd8\xd5\xe9\xabe\xf8\x9aF\v\xc7\xeex\xe2\x145M\x11s\xfbP1\\ZeP\x98\xa7ț\x9eU\xe9lc\xfc\xed\x1f\x15\xa8\x03\xc1\xe2.\xb5\xed\xd1\x1c\x8c\xf2\vS\xdb\xcdQ\x10\x15^l\r\xf9\xb4{\x86x\xb3\x94\xc9;\xe14\xe1\xf1x\xf0\x1b+#\x9a\x8d\x86\x15|v\x0f\x11\xedc\xe0s!\xeb\xaf#\x9fM\x19\xad\xa9'\x88^v\xdb1\x7f\xe31\xa9\xe9ӭ\xb1?\xe8d\xd0)'\x82҂\xf2\x93'\x80^j\x1b2\xb5\x11I\xb6\xbd\xd2N\xf8\xcc\v\xe0\xbd\xe0\x89\x9e\x978ɓ\x88\xa9\x94\x93;\xf3\xf0\xf4\n'u^\xf5\x84\xcek\x9d\xccI>\x91\x93\x94v\x92\x1c\x99MI\x1b\x99\x0e\x9e\x8e\x9f\xb4I8a\x93\x10V\x9d\x1ai\xc2I\x9ay'h\x12p\x98\xba4^\xf1\xa4\xcc+\x9e\x90y\xed\x931\x13L2\xf1z\xde\t\x98\x93\xdd\xfaR\xe5\xa0FC#\xa9\\8\xca\x7f){\x8d\xee@\x8eb\x02\xa14\x9dmձ_Q\\\xfbj\x98X\xf7t(\xc4g9\xad\xa5\xfd;\xf1\x9a\xc6\x1c\xe9\x1aw\xbe\x18\xaa\v\xe9h(\xa9\xc2\x02\xbb\xeb\x83K\xf9\x88\xaa\xca\x0f4\xdb\x1dA\xdfQM6R\x15Ԑ\x8b:Hv\xe5\x80ۿ/.\t\xf9(뼁v\xed\x18͊\x92\x1f쎁\\\xb4?8\x8d\x03\xa2\xdc\x16z\xbb\x95\x9ce\x11[*Z?\xc85\xee\x15t\xc0\xaaFY;\xac^چqS\nͮn\x9dƍ\xe4\\>\xce܋Ӓ\xfd'\x96\x97~\x86\xb7\xe6\xdd\xed\na\x04\xf6\xc0z\xd5u\x02S=\x9b5X5\xd9\xccsh\xed\xaf6\x1d\x88\xdd\\\xc0v\x05W\xc8]\xb1ޠ\xa6\xbd\xe8̤\x95.\xb7+7\x8e\xa1^,\xcfPq \x12\xb3N̎\xa9|YRe\x0e.\x99a\xd1\x19CP\x8bcޖA\xed\xd1/@\x1cEo\xa8;\x8cQ\xbcC\xd9\r\x8c\x1e\xe3\xee\x94q\f\x9f\xf0\x9b<\xdbw\xc6q\f[\x18K\xc4T\xe4\xe7hv\xd4ټXڗ\xcf\xfdU\xee\xe1}ԛ\xd5A\xcf\xddQ\xf3H\nS\x80\xe8*\xc3\x0efr\xae\x01\xab\xc6\xf6_=#')t\xed\xeb~\x9e⸺낈\xcc/\x94A\r\x9d\xc5\xe4\x13V)?\x90ۯ\xb8g\xaaE\x9b_\xa2~\xcf\x14\\W!`\x1a\x81\xe3?\xf8\xe9\xfc\xe9[\xdaHE\xb7\xf0\x8bt\x85\xa0\xa7\xc8\xdem\xdd)\x10\ueb5e\x90c\x19\x16M\xacJ\xac/I}\x04\xacɋ\xeeU\u07b5\xa3\x9cYK\xd8\x18~\n\xdd\xef\xef\x7fq\xb32\xac\x80\xcb\xf7\x95K\t\xb02Q\x83Eq\x98\xad\x83\xb4\xb6\xff\xdd\xc9G,P\x1b\xf7+\x86\xc2\xfe\xcdd\x14`B6\xa6\xe9͚RUrIsP7Rl\xd8vbv\xbfu\x1a\x1f\xa9\xd9\f\x7f\xf4\x93\xabuT\x80\x7f\xe68\xbd\xb5y8\a\xfe\x91q\xd0nX\t\x02\xf8\xb6\xffU-\x8f\xabb\xedl\xb8\x8d}Yw0\xa0\xe3ܴ\xd05\\\x82\xb2V\x94s\"W:\xf0\xea\xf0\xc4\x1b\x8a0a`\v\xfd\r݈\x04\xdew\n\x93\a>\x9f\x12G_\xe3_\xb5\xcc\xca\xd6Jsv\xa5\xdcD\x06>\x04\xa7u\xcd\xc3#3\xbe,\xd3y\xebh\x0em\x16\x86\n\xd8c\xc5\xf6\xe9\x12\xf6\xae\xb0\xbb\xbf\xf8\xc23r\xa5\xb0\b\xa6/\xfa\x8eE#O\xaab\xbf\xaeӁ\xea\xd4\"\xfd\xce\x18(J\x13\xd3\xd2ӂ\xe4\xa71\x80\xb5\x85#\r\xe5-~\xa6\xa1A\xccF\xd5\a\x91\x8d\xa5-\xf9uT8h\x83\x8c`\xa77\nir\xc2>\x99\x18D\x1eVz8\b3\x0f\x15\x9e\n>\xd7N\x1bZ\x9cT\x8f\xff\xa6\x0f\x06odQy+e\x8f\xd6c\xa7\xba!\x7fL,7\xe0ܗ\xb8=\xb1\xd0 '\xb0\aA\xac^s(\x0eW\n̈́\xe2\xcfO:\xdd\x104Ep\"D\xef\x9d!\xdeO\xa0\xf1~\x93\x1ft\r\x133\x11\xf1\xba\x8a>\x12\xfaf\xa3\xdb\xe7_[\xbb\x19\x96\x16\xc4i\xf6^T6g\x9au\xf5\xc2\xf3\x84\xdc\xcd\xddj\b\xdc)\"\xae\x7f\x9b\xc73\x97q\x7f\xba\xcf\x12i\xfd\xe9\xce\x12h\x11\x885\x8f\x9f\x7f\xee\xb8\xd4O+ٍ_:\x83#\v'\xb4(\xe7\xfe\x18]\x01Z\xd3m\xa8\xd5\xfdh\x8d\xf6-\bp\x8e-\x17\x06\x88\x00m\xce\\u+U\xbb%C3SQ\xdfAH\x1fm\xb5\xfaA\x13.cP\xf1\xbe\x0e\x16.\x82\n\xbb\x99\x99\x88z*\x99J\xd9\xfd|\xa8\x1bZܠ\r\x89\xd4i\xae\xee\x02ζ\xcc\xee\x12,\xe5\xb6T\xad\xe9\x16\x96\x99\xe4\x1cPZ\xf7\xc7\xf5\x92kݟl\xfb\x02TON\xedc\xbb\xad\x8fe9j\xbb\x10.u\xc9\xd4x9\x93a\n\x9a{\xd2z\x03\x92\xd8\U0006c34d\xc3B\xf4\x12\xb1\xfeH\xdbmê\xf3b\xd9{H\xfd\x1db\v\xbf\xa3\x8e\xf3cA\x7f\x97jA\n&\xec?T\xe4.\x14\x15>\x9e5\xfe\x9d\x94\x0fw\x11#\xb67\xf8\x9f\xeb\x86M\x90\x80\t7l<\x8e\xb8\x96\x95\x8f#\xd7\x06m< \x81u\xd7ϼQC\x98#\xfa\xa07\x9dA_\xe8\xcf\x1dH\x93\xaa\xc0\xf5<\x00\xeb.\\T\xc5\xf9aq\f\xf9\xe8R\xbc\x06v\xab.\xbd7\x03\x9a\xd3\xee\x03\x1d\x85XN\x14H]V\xa1-\xd0O\xd9/z4\x0f\x19\x93=\x1c\xffܴ\x1e£\x1bf\xcb\xdc\x1b\x98`\xc7\b<\xefV\x17/!\x98`\xfe[ۦ>\x19\xdfڸ\x85|\xa7A\xffV\xfcd\xf5\x92|\x82\xbe\xa3\xdf\x1d\x96\x86\x1cs\fpUE\x9a\xacĭ\x92[\x05\xba\xcftK\xf2w\xca\f\x13ۏR\xdd\xf2j\xcb\xc4\xe7\xe1\x83!c\x8do\xa92\xcc2\xad\x1bOl\xa0LP\xce\xfe\x19\x93O\xed\x97Ӏn\x067JK\x920\x8c\xa1\x17\xef\xc1ڪ\x83\xfb\xfb\xa8(,=^O\xb1;\x02M\xa6dcm\x1346E\xe8\xf6\x92|\x92\xd1\x05\xee\x13tX\x17\xa65\xad@\x9b%l6R\x19\x17\xaf].\t\xdb\x04'\x82\x95\x1d\xe89rW\n\x12\x16\v\xb4֩\x0f\x8d\x1aB\xb7\xafBm\x8a\x05\xc7\vzp\xb1\x19\x9ae\x95\xb5\x94\xae\xb4\xa1\xc5[\x0e\xd6\xf4\xd2\x00\xdd\xedӬm\xf2\xfe\x8c~\xa3s:\x8d\xc2]\xd5\xe7\xf1\x9a\xec\xcf\xe8.z1_\xd1y\xa7\xfcH\xf1\xa6ۓV\xed\xdf\xfd\xb7\x11g\x91\a{nwQ\xcb[\x14\x06\xfe\xaa\xfe\xa2\xa8V\xea\xfd\x88r:oI\vߓ\xff\xe5\x7f\x03\x00\x00\xff\xff\xe1\xa0\x1ak\x81\x7f\x00\x00"), []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xccYI\x93b\xb9\x11\xbe\xf3+2b\x0es\xe9\aݶ\x0f\x0e.\x0e\x9a\xb2#:\\\xed\xaah\xca\xe5\xeb\b)\x01\rzҳ\x16h\xbc\xfcwGj\x81\xc7[\x1a\xaa\xed\x18\x8f.UOK*\xd7/3EUU\x13\xd6\xc8W\xb4N\x1a=\a\xd6H\xfc\xeaQӗ\x9b\xee\x7f\xef\xa6\xd2\xcc\x0e\x1f&{\xa9\xc5\x1c\x96\xc1yS\x7fAg\x82\xe5\xf8\x80\x1b\xa9\xa5\x97FOj\xf4L0\xcf\xe6\x13\x00\xa6\xb5\xf1\x8c\xa6\x1d}\x02p\xa3\xbd5J\xa1\xad\xb6\xa8\xa7\xfb\xb0\xc6u\x90J\xa0\x8d\xc4\xcbՇ\xf7\xd3\x0f\xbf\x9b\xbe\x9f\x00hV\xe3\x1c\u058c\xefC㼱l\x8b\xca\xf0Drz@\x85\xd6L\xa5\x99\xb8\x069ݰ\xb5&4s\xb8,$\n\xf9\xf6\xc4\xf9\xc7Hl\x95\x88=fbq]I\xe7\xff<\xbe\xe7Q:\x1f\xf75*X\xa6\xc6؊[\xdc\xceX\xff\x97\xcb\xd5\x15\xac\x9dJ+Ro\x83bv\xe4\xf8\x04\xc0q\xd3\xe0\x1c\xe2\xe9\x86q\x14\x13\x80\xac\x9aH\xad\x02&DT6S\xcfVj\x8fviT\xa8\xf5\xf9.\x81\x8e[\xd9\xf8\xa8\xcc$\vda\xa0H\x03\xce3\x1f\x1c\xb8\xc0w\xc0\x1c,\x0eL*\xb6V8\xfb\xabf\xe5\xffH\x0f\xe0gg\xf43\xf3\xbb9Lөi\xb3c\xae\xac&\x1b=\xb7f\xfc\x89\x04p\xdeJ\xbd\x1db\xe9\x919\xffʔ\x14\x91\x93\x17Y#H\a~\x87\xa0\x98\xf3\xe0i\x82\xbe\x92\x86\x80T\x84P4\x04G\xe6\xf2=\x00\x87D%\xeah\x98Sջ\xeb\x8amb\x05^;T\x12\xff4\x93\xb9o\x91-\xfe=\xe5\x16\xcf$\x9dgusEw\xb1\xc51bW\xaax\xc0\r\vʷE%+\xa9\xb6_^\x8b\xd5 \x9f\x8at\xea\xeaƇ\xab\xb9t\xeb\xda\x18\x85,QI\xbb\x0e\x1f\x92\x17\xf2\x1d\xd6l\x9e7\x9b\x06\xf5\xe2\xf9\xd3\xeboWW\xd30\xe4H\x9d\xa0 ñ\x96mvh\x11^c\xfc%\xbb\xb9,ڙ&\x80Y\xff\x8c\xdc_\x8c\xd8XӠ\xf5\xb2\x04K\x1a-,j\xcdvx\xfaWu\xb5\x06@b\xa4S \b\x940\xf9U\x8e\x1f\x14Yr0\x1b\xf0;\xe9\xc0bcѡN0E\xd3Lg\x06\xa7\x1d\xd2+\xb4D\x86b;(AXv@\xeb\xc1\"7[-\xffq\xa6\xed\xc0\x9b\xec\xcc\x1e\x9d\x87\x18\xa1\x9a)rր\xef\x80iѡ\\\xb3\x13X\xa4;!\xe8\x16\xbdx\xc0u\xf9\xf8L\xd1 \xf5\xc6\xcca\xe7}\xe3\xe6\xb3\xd9V\xfa\x82\xd0\xdc\xd4u\xd0ҟf\x11l\xe5:xc\xddL\xe0\x01\xd5\xcc\xc9m\xc5,\xdfI\x8f\xdc\a\x8b3\xd6\xc8*\n\xa2\x13\xa4\xd6\xe2\a\x9b1\xdd]]\xdb\v\xe94\"\xa4\xbe\xc1<\x04\xaf\xc9e\x12\xa9$\xe2\xc5\n4E\xaa\xfb\xf2\xc7\xd5\v\x14N\x92\xa5\x92Q.[{z)\xf6!mJ\xbdA\x9b\xcem\xac\xa9#MԢ1R\xfb\xf8\xc1\x95D\xed\xc1\x85u-=\xb9\xc1\xdf\x03:O\xa6\xeb\x92]\xc6,\x06k\x84\xd0D\x90\xe8n\xf8\xa4a\xc9jTK\xe6\xf0\x17\xb6\x15Y\xc5Ud\x84\xbb\xac\xd5\xce\xcd\xdd\xcdI\xbd\xad\x85\x92SGL;\x88\x06\xab\x06\xf9U\xdc\tt\xd2Rdx\xe61FWGA\x19*Ɠr\x19\xc3 A\x83q\x8e\xce}6\x02\xbb+\x1d\x96\x17\xe7\x8dW<6hk\xe9bz\x85\x8d\xb1\xdd\xcc\xc3\xceH\xde\x1e\x05\xf1\xba\x06\a@\x1d\xea>#\x15|A&\x9e\xb4:\x8d,\xfd\xcdJ߿hĐ4\x12\x8b\xab\x93\xe6\xcfh\xa5\x117\x84\xff\xd8\xd9~V\xc1\xce\x1ca\x13\xfd_{u\"\xecr'\xcd\xfb\xa8]\xc6\xe2\xf9SA\xf0\x14[90\xb3\xae\xa6\xb0\xc8Am6\xf0\x1e\x84tTH\xb8H\xb4\xaf,\x1dT,4\xe6\xe0mx\x93\xf8\xdc\xe8\x8d\xdc\xf6\x85n\xd7Fc\x1es\x83tGs\xcbx\x13\xa1\x16yGc\xcdA\n\xb4\x15Ň\xdcH\x9e9\t6e\x90\x8dD%z\xd84\x1aeQ\x14\x8b\x82\x82\x9a\xa9\x1b6\\\x9e7\xc6J\x9aI\x9d<\xf8B b\x8d\xadsj\xd6\x1e\xb5\xc0n\xb6\x89ܘ\bh\x0e\x05\x1c\xa5\xdf%\xa4TCq\aߌ=\x1a{<\rMwx\x7f\xd9!\xedL\x89\x17\xc1!\xb7裷\xa1\"\xf7!W\x9a\x02|\x0e.bm\x17'ʈ\x05_9\xbd\xc7S_\xd1p˸\xb9\x14\xba\xcdr/{\x95A\xa5y\x11\xc4\xe2\x06-\xea^\xb5P\xc6@\x06\xa0\xb6\xc7j\xf4\x18\x93\x800\xdc\x11\xfesl\xbc\x9b\x99\x03ڃ\xc4\xe3\xech\xec^\xeamE\xe6\xa9r\xbc\xcdb33\xfb!\xfe\x19\xb9\xef\xe5\xe9\xe1i\x0e\v!\xc0\xf8\x1dZ\xb2\xf1&\xa8▭\xaa\xea]L\xde\xef H\xf1\x87\xefQ\xa2iR\x98ݡ\xc8U\f\x95\x13U\x87\x91'\xd2\xdb*\x99\xd0X\xa0\xfcK\x9eQg\xd3'`\x1a\xf2ڡ\xb2\xb6=\b\xc5(\xdd\f\xc1\xef\x1e\xfb\xc8\xfb\x8d\x98\x04\xf8Z]\xecTլ\xa9\xd2n\xe6M-\xf9\xa4+m\xac\xbdo\x84o\xa9\xf5\xa5\x16\x92Smx\x1dv\xa5\a\x12W-\xc1\x80\x1a\xbaM\xc2\x18\xd8\f\xab)\x89\x9bS\xed\r\x8e\x9f\xda{/\x9dcB\xbe\x9c>\x1dz*\xdb\x1ch\xa4\xf4\xcal_\xcf\x11o\xb8њ\x02\xdd\x1b`g\x14\xfd\xd1u\xd3\xc7\x1b\xc1g\x1d\xf8\x1e\a\x14\xdf\x13\xe5c\xdcXt\x9c\x8e\x11/\xc1a\xc4\xf5[l\xc0\xed\x88\xe0l\x89\xf6\x1e^\x96\v\xdax\xce\xc0\f\x96\vX\a-\x14\x16\x8e\x8e;\xd4Դ\xc8\xcdi\xf8.\x1a/\x8f\xab\xa2\xd5X\xbc䶣\xe8vX\x86\x94\x1e\xe6\xb0>\r\x94\x1bw\b\xd9X\xdcȯw\b\xf9\x1c7\x16\x857\xcc\xef@j'\x05\x02\x1bP\x7f\xaa\x03G\x04=\x97\x16O\x19s\xbe\xc3<\xdf\u0086\xc4\xce[\xe0\xa1\xe8\xf8F\xfc<\xe7mg-\x94\xef\x9c<\xae\xcḇ8\x1e\x94\xe8p~\xd3\xf8S*\xde\xf8@\x16\xbeb\xe6\xb5\x7f\xe2\x1bE`yY\x19\nf*9\x8c\xb5\xe8\x1a\xa3\x05\xb5l\xf7\x95\x80\x17\x96\xffw\x85\xe0\xb0Y\xabk\x94\xeb\xac\x15+\xdc\xd5\x05\xc5W\xa47\xf7A\xe9m\xad\xdde\x98\xb5\xa3\xfe\xf4\xd2\nud\xfcE:\xa0\xc1\x8a\xa6\xd5\x16Qg\xae!\xe8X\x18ƒa:\x99\f\x1cy\xa0&\x9cR\x98\x98\x93p6\x9e\xd4\xe6H\xa7[\xe4\"\x050:%|\xea\r\x99\x16\xb9+\xa7\xa5\x01\xcaG\xa9\x14\x15\x01\x16kCڢ\xb2֢:\x01s\xe4M\x87\xdfL\xdf\xff\xffZ.Ŝ\xa7\x0e\n\xc5\x17<\xc8\xfe\xd3\xd4}\xfa~\xecQ)\xf0p\x0e\x1a\xfa\xf8\xa9t\xeb3\x9b\xb7\xfd\x04\x1b\xa9\xa8\x98la\xc7\x1d\xe5\xc1\xc0\xc3\xea\xc7\xd5\xe3\x8f.\xf6\x10\xa8\xbd\x83#Y\xd0E\x96\xa8i0\xf9\x85$8OY\xe4\xb6\x03\x14{&/\x00e\xf4\x96*\xcf\xf4\\B%^\xf2'cA\xa0\xa7l\xa5\xb7\xc0wLo)6\x86@?r\x9c\xd9o3J\xee3\xea!R\x8f\xb8\xc7]\x16}\x91C=\xc1[\xac9\xfe\x8e}\xe6?\x9b\xf6\xf2\\\xdaQ\xfc\x18\xd8\x16St\x17K2'EW\xfe\xf2\xb6}\x19\xdf\xdf`\xf7\x1fοW=\xff\xd5S\x7f\xef\x89\xffW\xa1\x9c\x9a*ݛ\xe5\xf3\xe7\xb4+=x\xe6#\xc0\xd6&\xf8\x81\xec\xdfr\xf8\xc1\xa0\x8e\xbff\xbc\x85\xc7\xf8\x1bͭ\x02\x85\xf6\x14\x8b\xf0`m|\x14-\x8fu\x11*\x86\xf2\xd2\xfd\x10\xbc\xe8\xfc\x94\xd4^\xeb\xff\xd0t\x87\\\x83y\xba7\x99rmˮY\xc9홰>?u\xcf\xe1\x9f\xff\x9e\xfc'\x00\x00\xff\xff\xf3/:\xb2\x01\x1d\x00\x00"), []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xbcVMs\xdb6\x10\xbd\xebW\xecL\xaf%\x15O{\xe8\xf0\xd689x\xdaf4v&w\bX\x89\x88A\x00\xdd]\xc8u?\xfe{\a\x00)K\x14\xe5$\x97\xf0&`\xb1\xfb\xf0\xde\ue0da\xa6Y\xa9h?!\xb1\r\xbe\x03\x15-\xfe%\xe8\xf3/n\x1f\x7f\xe1ֆ\xf5\xe1f\xf5h\xbd\xe9\xe06\xb1\x84\xe1\x1e9$\xd2\xf8\x0ew\xd6[\xb1\xc1\xaf\x06\x14e\x94\xa8n\x05\xa0\xbc\x0f\xa2\xf22\xe7\x9f\x00:x\xa1\xe0\x1cR\xb3G\xdf>\xa6-n\x93u\x06\xa9$\x9fJ\x1f\u07b47?\xb7oV\x00^\r\u0601A\x87\x82[\xa5\x1fS$\xfc3!\v\xb7\atH\xa1\xb5a\xc5\x11uο\xa7\x90b\a/\x1b\xf5\xfcX\xbb\xe2~WR\xbd-\xa9\xeek\xaa\xb2\xeb,\xcbo\xd7\"~\xb7cTt\x89\x94[\x06T\x02\xd8\xfa}r\x8a\x16CV\x00\xacC\xc4\x0e>dXQi4+\x80\xf1\xda\x05f\x03ʘB\xa4r\x1b\xb2^\x90n\x83K\xc3D`\x03\x06Y\x93\x8dR\x88\xfa\xd8c\xb9\"\x84\x1dH\x8fPˁ\x04\xd8\xe2\x88\xc0\x94s\x00\x9f9\xf8\x8d\x92\xbe\x836\xf3\xd5\xd6\xd0\fd\f\xa8T\xbf\x9d/\xcbs\x06\xccB\xd6\xef\xafA`Q\x92x\x02Q\xea\xda\xe0\x81N\xf8=\aP\xe2\xdb\xd8+>\xaf\xfeP6\xaeU\xae1\x87\x9bʴ\xeeqP\xdd\x18\x1b\"\xfa_7w\x9f~z8[\x86s\xac\v҂eP\x13\xd2L\\e\r\x82G\b\x04C\xa0\x89Un\x8fI#\x85\x88$vj\xad\xfa\x9d\f\xcf\xc9\xea\f¿\xcd\xd9\x1e@F]O\x81\xc9S\x84\\H\x1c\x9b\x02\xcdx\xd1J\xaee \x8c\x84\x8c\xbe\xceU^V\x1e\xc2\xf63jig\xa9\x1f\x90r\x1a\xe0>$g\xf2\xf0\x1d\x90\x04\bu\xd8{\xfb\xf717\xe7{\xe7\xa2NI\xa1$\xb7\x9dW\x0e\x0e\xca%\xfc\x11\x947\xb3̃z\x06\xc2\\\x13\x92?\xc9W\x0e\xf0\x1c\xc7\x1f\x99D\xebw\xa1\x83^$r\xb7^\xef\xadL\x96\xa2\xc30$o\xe5y]\xdc\xc1n\x93\x04\xe2\xb5\xc1\x03\xba5\xdb}\xa3H\xf7VPK\"\\\xabh\x9br\x11_l\xa5\x1d\xcc\x0f4\x9a\x10\x9f\x95\xbd\xe8\x9e\xfa\x15\x17\xf8\x06y\xb2'\xd4\x1e\xa9\xa9\xea\x15_T\xc8K\x99\xba\xfb\xf7\x0f\x1faBR\x95\xaa\xa2\xbc\x84^\xf02\xe9\x93ٴ~\x87T\xcf\xed(\f%'z\x13\x83\xf5R~hg\xd1\vp\xda\x0eVx\xea\xd8,\xdd<\xedm\xb1\xdd\xec\x00)\x1a%h\xe6\x01w\x1enՀ\xeeV1~g\xad\xb2*\xdcd\x11\xbeJ\xad\xd3\xc7d\x1e\\\xe9=٘\x9e\x81+\xd2.\f\xffCD\x9d\xc5\xcd\xfc\xe6\xd3vgu\x1d\xab] x\xea\xad\xee\xa7\xe1\x9f\xd1t4\x8as\xfe\x96\x8d!\x7f/v;߹zy(\"[\xc2Y\xc36p\xe1ݯ\xf3RL\xf5\x1b\x99\xa9\x8e>r\xa3\x13Qi\xbe\xa3ϫ\xa5C_\xcb\x05\x12\x05\xbaX\x9d\x81z_\x82\xca?\x06e=\x83\xf2\xcf\xe3A\x90^\t namespace: velero data: - configs: | + : | { - "repo-type-1": { - "cacheLimitMB": 2048, - "enableCompression": true - }, - "repo-type-2": { - "cacheLimitMB": 1024, - "enableCompression": false - } - } + "cacheLimitMB": 2048, + "enableCompression": true + } + : | + { + "cacheLimitMB": 1, + "enableCompression": false + } ``` To create the configMap, users need to save something like the above sample to a file and then run below commands: ``` -kubectl create cm -n velero --from-file= -``` -Or -``` kubectl apply -f ``` diff --git a/pkg/apis/velero/v1/backup_repository_types.go b/pkg/apis/velero/v1/backup_repository_types.go index 6a062c4fe..af02c123e 100644 --- a/pkg/apis/velero/v1/backup_repository_types.go +++ b/pkg/apis/velero/v1/backup_repository_types.go @@ -41,6 +41,11 @@ type BackupRepositorySpec struct { // MaintenanceFrequency is how often maintenance should be run. MaintenanceFrequency metav1.Duration `json:"maintenanceFrequency"` + + // RepositoryConfig is for repository-specific configuration fields. + // +optional + // +nullable + RepositoryConfig map[string]string `json:"repositoryConfig,omitempty"` } // BackupRepositoryPhase represents the lifecycle phase of a BackupRepository. diff --git a/pkg/apis/velero/v1/zz_generated.deepcopy.go b/pkg/apis/velero/v1/zz_generated.deepcopy.go index 522e15105..03f98b425 100644 --- a/pkg/apis/velero/v1/zz_generated.deepcopy.go +++ b/pkg/apis/velero/v1/zz_generated.deepcopy.go @@ -111,7 +111,7 @@ func (in *BackupRepository) DeepCopyInto(out *BackupRepository) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) } @@ -169,6 +169,13 @@ func (in *BackupRepositoryList) DeepCopyObject() runtime.Object { func (in *BackupRepositorySpec) DeepCopyInto(out *BackupRepositorySpec) { *out = *in out.MaintenanceFrequency = in.MaintenanceFrequency + if in.RepositoryConfig != nil { + in, out := &in.RepositoryConfig, &out.RepositoryConfig + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupRepositorySpec. diff --git a/pkg/builder/data_download_builder.go b/pkg/builder/data_download_builder.go index 51dd90e06..e0ed2ba6d 100644 --- a/pkg/builder/data_download_builder.go +++ b/pkg/builder/data_download_builder.go @@ -19,6 +19,7 @@ package builder import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/vmware-tanzu/velero/pkg/apis/velero/shared" velerov2alpha1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1" ) @@ -122,3 +123,21 @@ func (d *DataDownloadBuilder) StartTimestamp(startTime *metav1.Time) *DataDownlo d.object.Status.StartTimestamp = startTime return d } + +// CompletionTimestamp sets the DataDownload's StartTimestamp. +func (d *DataDownloadBuilder) CompletionTimestamp(completionTimestamp *metav1.Time) *DataDownloadBuilder { + d.object.Status.CompletionTimestamp = completionTimestamp + return d +} + +// Progress sets the DataDownload's Progress. +func (d *DataDownloadBuilder) Progress(progress shared.DataMoveOperationProgress) *DataDownloadBuilder { + d.object.Status.Progress = progress + return d +} + +// Node sets the DataDownload's Node. +func (d *DataDownloadBuilder) Node(node string) *DataDownloadBuilder { + d.object.Status.Node = node + return d +} diff --git a/pkg/builder/data_upload_builder.go b/pkg/builder/data_upload_builder.go index 7ff33dcb0..465f6b94e 100644 --- a/pkg/builder/data_upload_builder.go +++ b/pkg/builder/data_upload_builder.go @@ -133,7 +133,14 @@ func (d *DataUploadBuilder) Labels(labels map[string]string) *DataUploadBuilder return d } +// Progress sets the DataUpload's Progress. func (d *DataUploadBuilder) Progress(progress shared.DataMoveOperationProgress) *DataUploadBuilder { d.object.Status.Progress = progress return d } + +// Node sets the DataUpload's Node. +func (d *DataUploadBuilder) Node(node string) *DataUploadBuilder { + d.object.Status.Node = node + return d +} diff --git a/pkg/cmd/cli/install/install.go b/pkg/cmd/cli/install/install.go index c51cee658..b73438e78 100644 --- a/pkg/cmd/cli/install/install.go +++ b/pkg/cmd/cli/install/install.go @@ -364,8 +364,10 @@ func (o *Options) Validate(c *cobra.Command, args []string, f client.Factory) er return err } - if err := uploader.ValidateUploaderType(o.UploaderType); err != nil { + if msg, err := uploader.ValidateUploaderType(o.UploaderType); err != nil { return err + } else if msg != "" { + fmt.Printf("⚠️ %s\n", msg) } // If we're only installing CRDs, we can skip the rest of the validation. diff --git a/pkg/cmd/cli/nodeagent/server.go b/pkg/cmd/cli/nodeagent/server.go index e19e1ab7f..181afbf69 100644 --- a/pkg/cmd/cli/nodeagent/server.go +++ b/pkg/cmd/cli/nodeagent/server.go @@ -293,17 +293,29 @@ func (s *nodeAgentServer) run() { loadAffinity = s.dataPathConfigs.LoadAffinity[0] } dataUploadReconciler := controller.NewDataUploadReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.csiSnapshotClient.SnapshotV1(), s.dataPathMgr, loadAffinity, repoEnsurer, clock.RealClock{}, credentialGetter, s.nodeName, s.fileSystem, s.config.dataMoverPrepareTimeout, s.logger, s.metrics) - s.attemptDataUploadResume(dataUploadReconciler) if err = dataUploadReconciler.SetupWithManager(s.mgr); err != nil { s.logger.WithError(err).Fatal("Unable to create the data upload controller") } dataDownloadReconciler := controller.NewDataDownloadReconciler(s.mgr.GetClient(), s.mgr, s.kubeClient, s.dataPathMgr, repoEnsurer, credentialGetter, s.nodeName, s.config.dataMoverPrepareTimeout, s.logger, s.metrics) - s.attemptDataDownloadResume(dataDownloadReconciler) if err = dataDownloadReconciler.SetupWithManager(s.mgr); err != nil { s.logger.WithError(err).Fatal("Unable to create the data download controller") } + go func() { + s.mgr.GetCache().WaitForCacheSync(s.ctx) + + if err := dataUploadReconciler.AttemptDataUploadResume(s.ctx, s.mgr.GetClient(), s.logger.WithField("node", s.nodeName), s.namespace); err != nil { + s.logger.WithError(errors.WithStack(err)).Error("failed to attempt data upload resume") + } + + if err := dataDownloadReconciler.AttemptDataDownloadResume(s.ctx, s.mgr.GetClient(), s.logger.WithField("node", s.nodeName), s.namespace); err != nil { + s.logger.WithError(errors.WithStack(err)).Error("failed to attempt data download resume") + } + + s.logger.Info("Attempt complete to resume dataUploads and dataDownloads") + }() + s.logger.Info("Controllers starting...") if err := s.mgr.Start(ctrl.SetupSignalHandler()); err != nil { @@ -373,31 +385,6 @@ func (s *nodeAgentServer) markInProgressCRsFailed() { s.markInProgressPVRsFailed(client) } -func (s *nodeAgentServer) attemptDataUploadResume(r *controller.DataUploadReconciler) { - // the function is called before starting the controller manager, the embedded client isn't ready to use, so create a new one here - client, err := ctrlclient.New(s.mgr.GetConfig(), ctrlclient.Options{Scheme: s.mgr.GetScheme()}) - if err != nil { - s.logger.WithError(errors.WithStack(err)).Error("failed to create client") - return - } - if err := r.AttemptDataUploadResume(s.ctx, client, s.logger.WithField("node", s.nodeName), s.namespace); err != nil { - s.logger.WithError(errors.WithStack(err)).Error("failed to attempt data upload resume") - } -} - -func (s *nodeAgentServer) attemptDataDownloadResume(r *controller.DataDownloadReconciler) { - // the function is called before starting the controller manager, the embedded client isn't ready to use, so create a new one here - client, err := ctrlclient.New(s.mgr.GetConfig(), ctrlclient.Options{Scheme: s.mgr.GetScheme()}) - if err != nil { - s.logger.WithError(errors.WithStack(err)).Error("failed to create client") - return - } - - if err := r.AttemptDataDownloadResume(s.ctx, client, s.logger.WithField("node", s.nodeName), s.namespace); err != nil { - s.logger.WithError(errors.WithStack(err)).Error("failed to attempt data download resume") - } -} - func (s *nodeAgentServer) markInProgressPVBsFailed(client ctrlclient.Client) { pvbs := &velerov1api.PodVolumeBackupList{} if err := client.List(s.ctx, pvbs, &ctrlclient.ListOptions{Namespace: s.namespace}); err != nil { diff --git a/pkg/cmd/server/server.go b/pkg/cmd/server/server.go index d8938ca56..3a2b07024 100644 --- a/pkg/cmd/server/server.go +++ b/pkg/cmd/server/server.go @@ -140,6 +140,7 @@ type serverConfig struct { disableInformerCache bool scheduleSkipImmediately bool maintenanceCfg repository.MaintenanceConfig + backukpRepoConfig string } func NewCommand(f client.Factory) *cobra.Command { @@ -253,6 +254,8 @@ func NewCommand(f client.Factory) *cobra.Command { command.Flags().StringVar(&config.maintenanceCfg.CPULimit, "maintenance-job-cpu-limit", config.maintenanceCfg.CPULimit, "CPU limit for maintenance job. Default is no limit.") command.Flags().StringVar(&config.maintenanceCfg.MemLimit, "maintenance-job-mem-limit", config.maintenanceCfg.MemLimit, "Memory limit for maintenance job. Default is no limit.") + command.Flags().StringVar(&config.backukpRepoConfig, "backup-repository-config", config.backukpRepoConfig, "The name of configMap containing backup repository configurations.") + // maintenance job log setting inherited from velero server config.maintenanceCfg.FormatFlag = config.formatFlag config.maintenanceCfg.LogLevelFlag = logLevelFlag @@ -288,8 +291,10 @@ type server struct { } func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*server, error) { - if err := uploader.ValidateUploaderType(config.uploaderType); err != nil { + if msg, err := uploader.ValidateUploaderType(config.uploaderType); err != nil { return nil, err + } else if msg != "" { + logger.Warn(msg) } if config.clientQPS < 0.0 { @@ -876,7 +881,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string } if _, ok := enabledRuntimeControllers[controller.BackupRepo]; ok { - if err := controller.NewBackupRepoReconciler(s.namespace, s.logger, s.mgr.GetClient(), s.config.repoMaintenanceFrequency, s.repoManager).SetupWithManager(s.mgr); err != nil { + if err := controller.NewBackupRepoReconciler(s.namespace, s.logger, s.mgr.GetClient(), s.config.repoMaintenanceFrequency, s.config.backukpRepoConfig, s.repoManager).SetupWithManager(s.mgr); err != nil { s.logger.Fatal(err, "unable to create controller", "controller", controller.BackupRepo) } } @@ -1148,9 +1153,15 @@ func markDataUploadsCancel(ctx context.Context, client ctrlclient.Client, backup du.Status.Phase == velerov2alpha1api.DataUploadPhaseNew || du.Status.Phase == "" { err := controller.UpdateDataUploadWithRetry(ctx, client, types.NamespacedName{Namespace: du.Namespace, Name: du.Name}, log.WithField("dataupload", du.Name), - func(dataUpload *velerov2alpha1api.DataUpload) { + func(dataUpload *velerov2alpha1api.DataUpload) bool { + if dataUpload.Spec.Cancel { + return false + } + dataUpload.Spec.Cancel = true - dataUpload.Status.Message = fmt.Sprintf("found a dataupload with status %q during the velero server starting, mark it as cancel", du.Status.Phase) + dataUpload.Status.Message = fmt.Sprintf("Dataupload is in status %q during the velero server starting, mark it as cancel", du.Status.Phase) + + return true }) if err != nil { @@ -1183,9 +1194,15 @@ func markDataDownloadsCancel(ctx context.Context, client ctrlclient.Client, rest dd.Status.Phase == velerov2alpha1api.DataDownloadPhaseNew || dd.Status.Phase == "" { err := controller.UpdateDataDownloadWithRetry(ctx, client, types.NamespacedName{Namespace: dd.Namespace, Name: dd.Name}, log.WithField("datadownload", dd.Name), - func(dataDownload *velerov2alpha1api.DataDownload) { + func(dataDownload *velerov2alpha1api.DataDownload) bool { + if dataDownload.Spec.Cancel { + return false + } + dataDownload.Spec.Cancel = true - dataDownload.Status.Message = fmt.Sprintf("found a datadownload with status %q during the velero server starting, mark it as cancel", dd.Status.Phase) + dataDownload.Status.Message = fmt.Sprintf("Datadownload is in status %q during the velero server starting, mark it as cancel", dd.Status.Phase) + + return true }) if err != nil { diff --git a/pkg/cmd/server/server_test.go b/pkg/cmd/server/server_test.go index 4d222b776..6e5995586 100644 --- a/pkg/cmd/server/server_test.go +++ b/pkg/cmd/server/server_test.go @@ -203,6 +203,13 @@ func Test_newServer(t *testing.T) { }, logger) assert.Error(t, err) + // invalid clientQPS Restic uploader + _, err = newServer(factory, serverConfig{ + uploaderType: uploader.ResticType, + clientQPS: -1, + }, logger) + assert.Error(t, err) + // invalid clientBurst factory.On("SetClientQPS", mock.Anything).Return() _, err = newServer(factory, serverConfig{ diff --git a/pkg/controller/backup_repository_controller.go b/pkg/controller/backup_repository_controller.go index 7e298d48d..0bc457a17 100644 --- a/pkg/controller/backup_repository_controller.go +++ b/pkg/controller/backup_repository_controller.go @@ -19,6 +19,8 @@ package controller import ( "bytes" "context" + "encoding/json" + "fmt" "reflect" "time" @@ -38,6 +40,8 @@ import ( "github.com/vmware-tanzu/velero/pkg/repository" repoconfig "github.com/vmware-tanzu/velero/pkg/repository/config" "github.com/vmware-tanzu/velero/pkg/util/kube" + + corev1api "k8s.io/api/core/v1" ) const ( @@ -51,17 +55,19 @@ type BackupRepoReconciler struct { logger logrus.FieldLogger clock clocks.WithTickerAndDelayedExecution maintenanceFrequency time.Duration + backukpRepoConfig string repositoryManager repository.Manager } func NewBackupRepoReconciler(namespace string, logger logrus.FieldLogger, client client.Client, - maintenanceFrequency time.Duration, repositoryManager repository.Manager) *BackupRepoReconciler { + maintenanceFrequency time.Duration, backukpRepoConfig string, repositoryManager repository.Manager) *BackupRepoReconciler { c := &BackupRepoReconciler{ client, namespace, logger, clocks.RealClock{}, maintenanceFrequency, + backukpRepoConfig, repositoryManager, } @@ -223,7 +229,7 @@ func (r *BackupRepoReconciler) getIdentiferByBSL(ctx context.Context, req *veler } func (r *BackupRepoReconciler) initializeRepo(ctx context.Context, req *velerov1api.BackupRepository, log logrus.FieldLogger) error { - log.Info("Initializing backup repository") + log.WithField("repoConfig", r.backukpRepoConfig).Info("Initializing backup repository") // confirm the repo's BackupStorageLocation is valid repoIdentifier, err := r.getIdentiferByBSL(ctx, req) @@ -238,6 +244,13 @@ func (r *BackupRepoReconciler) initializeRepo(ctx context.Context, req *velerov1 }) } + config, err := getBackupRepositoryConfig(ctx, r, r.backukpRepoConfig, r.namespace, req.Name, req.Spec.RepositoryType, log) + if err != nil { + log.WithError(err).Warn("Failed to get repo config, repo config is ignored") + } else if config != nil { + log.Infof("Init repo with config %v", config) + } + // defaulting - if the patch fails, return an error so the item is returned to the queue if err := r.patchBackupRepository(ctx, req, func(rr *velerov1api.BackupRepository) { rr.Spec.ResticIdentifier = repoIdentifier @@ -245,6 +258,8 @@ func (r *BackupRepoReconciler) initializeRepo(ctx context.Context, req *velerov1 if rr.Spec.MaintenanceFrequency.Duration <= 0 { rr.Spec.MaintenanceFrequency = metav1.Duration{Duration: r.getRepositoryMaintenanceFrequency(req)} } + + rr.Spec.RepositoryConfig = config }); err != nil { return err } @@ -366,3 +381,35 @@ func (r *BackupRepoReconciler) patchBackupRepository(ctx context.Context, req *v } return nil } + +func getBackupRepositoryConfig(ctx context.Context, ctrlClient client.Client, configName, namespace, repoName, repoType string, log logrus.FieldLogger) (map[string]string, error) { + if configName == "" { + return nil, nil + } + + loc := &corev1api.ConfigMap{} + if err := ctrlClient.Get(ctx, client.ObjectKey{ + Namespace: namespace, + Name: configName, + }, loc); err != nil { + return nil, errors.Wrapf(err, "error getting configMap %s", configName) + } + + jsonData, found := loc.Data[repoType] + if !found { + log.Info("No data for repo type %s in config map %s", repoType, configName) + return nil, nil + } + + var unmarshalled map[string]interface{} + if err := json.Unmarshal([]byte(jsonData), &unmarshalled); err != nil { + return nil, errors.Wrapf(err, "error unmarshalling config data from %s for repo %s, repo type %s", configName, repoName, repoType) + } + + result := map[string]string{} + for k, v := range unmarshalled { + result[k] = fmt.Sprintf("%v", v) + } + + return result, nil +} diff --git a/pkg/controller/backup_repository_controller_test.go b/pkg/controller/backup_repository_controller_test.go index 92ef5e335..873d2ce18 100644 --- a/pkg/controller/backup_repository_controller_test.go +++ b/pkg/controller/backup_repository_controller_test.go @@ -21,7 +21,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" @@ -29,6 +31,8 @@ import ( "github.com/vmware-tanzu/velero/pkg/repository" repomokes "github.com/vmware-tanzu/velero/pkg/repository/mocks" velerotest "github.com/vmware-tanzu/velero/pkg/test" + + clientFake "sigs.k8s.io/controller-runtime/pkg/client/fake" ) const testMaintenanceFrequency = 10 * time.Minute @@ -43,6 +47,7 @@ func mockBackupRepoReconciler(t *testing.T, mockOn string, arg interface{}, ret velerotest.NewLogger(), velerotest.NewFakeControllerRuntimeClient(t), testMaintenanceFrequency, + "fake-repo-config", mgr, ) } @@ -243,6 +248,7 @@ func TestGetRepositoryMaintenanceFrequency(t *testing.T) { velerotest.NewLogger(), velerotest.NewFakeControllerRuntimeClient(t), test.userDefinedFreq, + "", &mgr, ) @@ -370,10 +376,112 @@ func TestNeedInvalidBackupRepo(t *testing.T) { velerov1api.DefaultNamespace, velerotest.NewLogger(), velerotest.NewFakeControllerRuntimeClient(t), - time.Duration(0), nil) + time.Duration(0), "", nil) need := reconciler.needInvalidBackupRepo(test.oldBSL, test.newBSL) assert.Equal(t, test.expect, need) }) } } + +func TestGetBackupRepositoryConfig(t *testing.T) { + configWithNoData := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "config-1", + Namespace: velerov1api.DefaultNamespace, + }, + } + + configWithWrongData := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "config-1", + Namespace: velerov1api.DefaultNamespace, + }, + Data: map[string]string{ + "fake-repo-type": "", + }, + } + + configWithData := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "config-1", + Namespace: velerov1api.DefaultNamespace, + }, + Data: map[string]string{ + "fake-repo-type": "{\"cacheLimitMB\": 1000, \"enableCompression\": true}", + "fake-repo-type-1": "{\"cacheLimitMB\": 1, \"enableCompression\": false}", + }, + } + + tests := []struct { + name string + congiName string + repoName string + repoType string + kubeClientObj []runtime.Object + expectedErr string + expectedResult map[string]string + }{ + { + name: "empty configName", + }, + { + name: "get error", + congiName: "config-1", + expectedErr: "error getting configMap config-1: configmaps \"config-1\" not found", + }, + { + name: "no config for repo", + congiName: "config-1", + repoName: "fake-repo", + repoType: "fake-repo-type", + kubeClientObj: []runtime.Object{ + configWithNoData, + }, + }, + { + name: "unmarshall error", + congiName: "config-1", + repoName: "fake-repo", + repoType: "fake-repo-type", + kubeClientObj: []runtime.Object{ + configWithWrongData, + }, + expectedErr: "error unmarshalling config data from config-1 for repo fake-repo, repo type fake-repo-type: unexpected end of JSON input", + }, + { + name: "succeed", + congiName: "config-1", + repoName: "fake-repo", + repoType: "fake-repo-type", + kubeClientObj: []runtime.Object{ + configWithData, + }, + expectedResult: map[string]string{ + "cacheLimitMB": "1000", + "enableCompression": "true", + }, + }, + } + + scheme := runtime.NewScheme() + corev1.AddToScheme(scheme) + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeClientBuilder := clientFake.NewClientBuilder() + fakeClientBuilder = fakeClientBuilder.WithScheme(scheme) + + fakeClient := fakeClientBuilder.WithRuntimeObjects(test.kubeClientObj...).Build() + + result, err := getBackupRepositoryConfig(context.Background(), fakeClient, test.congiName, velerov1api.DefaultNamespace, test.repoName, test.repoType, velerotest.NewLogger()) + + if test.expectedErr != "" { + assert.EqualError(t, err, test.expectedErr) + } else { + assert.NoError(t, err) + assert.Equal(t, test.expectedResult, result) + } + }) + } +} diff --git a/pkg/controller/data_download_controller.go b/pkg/controller/data_download_controller.go index 12365e03c..a161f60bd 100644 --- a/pkg/controller/data_download_controller.go +++ b/pkg/controller/data_download_controller.go @@ -140,9 +140,17 @@ func (r *DataDownloadReconciler) Reconcile(ctx context.Context, req ctrl.Request } else if controllerutil.ContainsFinalizer(dd, DataUploadDownloadFinalizer) && !dd.Spec.Cancel && !isDataDownloadInFinalState(dd) { // when delete cr we need to clear up internal resources created by Velero, here we use the cancel mechanism // to help clear up resources instead of clear them directly in case of some conflict with Expose action - if err := UpdateDataDownloadWithRetry(ctx, r.client, req.NamespacedName, log, func(dataDownload *velerov2alpha1api.DataDownload) { + log.Warnf("Cancel dd under phase %s because it is being deleted", dd.Status.Phase) + + if err := UpdateDataDownloadWithRetry(ctx, r.client, req.NamespacedName, log, func(dataDownload *velerov2alpha1api.DataDownload) bool { + if dataDownload.Spec.Cancel { + return false + } + dataDownload.Spec.Cancel = true - dataDownload.Status.Message = fmt.Sprintf("found a datadownload %s/%s is being deleted, mark it as cancel", dd.Namespace, dd.Name) + dataDownload.Status.Message = "Cancel datadownload because it is being deleted" + + return true }); err != nil { log.Errorf("failed to set cancel flag with error %s for %s/%s", err.Error(), dd.Namespace, dd.Name) return ctrl.Result{}, err @@ -563,9 +571,15 @@ func (r *DataDownloadReconciler) findSnapshotRestoreForPod(ctx context.Context, } } else if unrecoverable, reason := kube.IsPodUnrecoverable(pod, log); unrecoverable { err := UpdateDataDownloadWithRetry(context.Background(), r.client, types.NamespacedName{Namespace: dd.Namespace, Name: dd.Name}, r.logger.WithField("datadownlad", dd.Name), - func(dataDownload *velerov2alpha1api.DataDownload) { + func(dataDownload *velerov2alpha1api.DataDownload) bool { + if dataDownload.Spec.Cancel { + return false + } + dataDownload.Spec.Cancel = true - dataDownload.Status.Message = fmt.Sprintf("datadownload mark as cancel to failed early for exposing pod %s/%s is in abnormal status for %s", pod.Namespace, pod.Name, reason) + dataDownload.Status.Message = fmt.Sprintf("Cancel datadownload because the exposing pod %s/%s is in abnormal status for reason %s", pod.Namespace, pod.Name, reason) + + return true }) if err != nil { @@ -586,75 +600,6 @@ func (r *DataDownloadReconciler) findSnapshotRestoreForPod(ctx context.Context, return []reconcile.Request{request} } -func (r *DataDownloadReconciler) FindDataDownloads(ctx context.Context, cli client.Client, ns string) ([]*velerov2alpha1api.DataDownload, error) { - pods := &v1.PodList{} - var dataDownloads []*velerov2alpha1api.DataDownload - if err := cli.List(ctx, pods, &client.ListOptions{Namespace: ns}); err != nil { - r.logger.WithError(errors.WithStack(err)).Error("failed to list pods on current node") - return nil, errors.Wrapf(err, "failed to list pods on current node") - } - - for _, pod := range pods.Items { - if pod.Spec.NodeName != r.nodeName { - r.logger.Debugf("Pod %s related data download will not handled by %s nodes", pod.GetName(), r.nodeName) - continue - } - dd, err := findDataDownloadByPod(cli, pod) - if err != nil { - r.logger.WithError(errors.WithStack(err)).Error("failed to get dataDownload by pod") - continue - } else if dd != nil { - dataDownloads = append(dataDownloads, dd) - } - } - return dataDownloads, nil -} - -func (r *DataDownloadReconciler) findAcceptDataDownloadsByNodeLabel(ctx context.Context, cli client.Client, ns string) ([]velerov2alpha1api.DataDownload, error) { - dataDownloads := &velerov2alpha1api.DataDownloadList{} - if err := cli.List(ctx, dataDownloads, &client.ListOptions{Namespace: ns}); err != nil { - r.logger.WithError(errors.WithStack(err)).Error("failed to list datauploads") - return nil, errors.Wrapf(err, "failed to list datauploads") - } - - var result []velerov2alpha1api.DataDownload - for _, dd := range dataDownloads.Items { - if dd.Status.Phase != velerov2alpha1api.DataDownloadPhaseAccepted { - continue - } - if dd.Labels[acceptNodeLabelKey] == r.nodeName { - result = append(result, dd) - } - } - return result, nil -} - -// CancelAcceptedDataDownload will cancel the accepted data download -func (r *DataDownloadReconciler) CancelAcceptedDataDownload(ctx context.Context, cli client.Client, ns string) { - r.logger.Infof("Canceling accepted data for node %s", r.nodeName) - dataDownloads, err := r.findAcceptDataDownloadsByNodeLabel(ctx, cli, ns) - if err != nil { - r.logger.WithError(err).Error("failed to find data downloads") - return - } - - for _, dd := range dataDownloads { - if dd.Spec.Cancel { - continue - } - err = UpdateDataDownloadWithRetry(ctx, cli, types.NamespacedName{Namespace: dd.Namespace, Name: dd.Name}, - r.logger.WithField("dataupload", dd.Name), func(dataDownload *velerov2alpha1api.DataDownload) { - dataDownload.Spec.Cancel = true - dataDownload.Status.Message = fmt.Sprintf("found a datadownload with status %q during the node-agent starting, mark it as cancel", dd.Status.Phase) - }) - - r.logger.Warn(dd.Status.Message) - if err != nil { - r.logger.WithError(err).Errorf("failed to set cancel flag with error %s", err.Error()) - } - } -} - func (r *DataDownloadReconciler) prepareDataDownload(ssb *velerov2alpha1api.DataDownload) { ssb.Status.Phase = velerov2alpha1api.DataDownloadPhasePrepared ssb.Status.Node = r.nodeName @@ -806,56 +751,139 @@ func isDataDownloadInFinalState(dd *velerov2alpha1api.DataDownload) bool { dd.Status.Phase == velerov2alpha1api.DataDownloadPhaseCompleted } -func UpdateDataDownloadWithRetry(ctx context.Context, client client.Client, namespacedName types.NamespacedName, log *logrus.Entry, updateFunc func(dataDownload *velerov2alpha1api.DataDownload)) error { - return wait.PollUntilContextCancel(ctx, time.Second, true, func(ctx context.Context) (done bool, err error) { +func UpdateDataDownloadWithRetry(ctx context.Context, client client.Client, namespacedName types.NamespacedName, log *logrus.Entry, updateFunc func(*velerov2alpha1api.DataDownload) bool) error { + return wait.PollUntilContextCancel(ctx, time.Second, true, func(ctx context.Context) (bool, error) { dd := &velerov2alpha1api.DataDownload{} if err := client.Get(ctx, namespacedName, dd); err != nil { return false, errors.Wrap(err, "getting DataDownload") } - updateFunc(dd) - updateErr := client.Update(ctx, dd) - if updateErr != nil { - if apierrors.IsConflict(updateErr) { - log.Warnf("failed to update datadownload for %s/%s and will retry it", dd.Namespace, dd.Name) - return false, nil + if updateFunc(dd) { + err := client.Update(ctx, dd) + if err != nil { + if apierrors.IsConflict(err) { + log.Warnf("failed to update datadownload for %s/%s and will retry it", dd.Namespace, dd.Name) + return false, nil + } else { + return false, errors.Wrapf(err, "error updating datadownload %s/%s", dd.Namespace, dd.Name) + } } - log.Errorf("failed to update datadownload with error %s for %s/%s", updateErr.Error(), dd.Namespace, dd.Name) - return false, err } return true, nil }) } -func (r *DataDownloadReconciler) AttemptDataDownloadResume(ctx context.Context, cli client.Client, logger *logrus.Entry, ns string) error { - if dataDownloads, err := r.FindDataDownloads(ctx, cli, ns); err != nil { - return errors.Wrapf(err, "failed to find data downloads") - } else { - for i := range dataDownloads { - dd := dataDownloads[i] - if dd.Status.Phase == velerov2alpha1api.DataDownloadPhasePrepared { - // keep doing nothing let controller re-download the data - // the Prepared CR could be still handled by datadownload controller after node-agent restart - logger.WithField("datadownload", dd.GetName()).Debug("find a datadownload with status prepared") - } else if dd.Status.Phase == velerov2alpha1api.DataDownloadPhaseInProgress { - err = UpdateDataDownloadWithRetry(ctx, cli, types.NamespacedName{Namespace: dd.Namespace, Name: dd.Name}, logger.WithField("datadownload", dd.Name), - func(dataDownload *velerov2alpha1api.DataDownload) { - dataDownload.Spec.Cancel = true - dataDownload.Status.Message = fmt.Sprintf("found a datadownload with status %q during the node-agent starting, mark it as cancel", dd.Status.Phase) - }) +var funcResumeCancellableDataRestore = (*DataDownloadReconciler).resumeCancellableDataPath - if err != nil { - logger.WithError(errors.WithStack(err)).Errorf("failed to mark datadownload %q into canceled", dd.GetName()) - continue - } - logger.WithField("datadownload", dd.GetName()).Debug("mark datadownload into canceled") +func (r *DataDownloadReconciler) AttemptDataDownloadResume(ctx context.Context, cli client.Client, logger *logrus.Entry, ns string) error { + dataDownloads := &velerov2alpha1api.DataDownloadList{} + if err := cli.List(ctx, dataDownloads, &client.ListOptions{Namespace: ns}); err != nil { + r.logger.WithError(errors.WithStack(err)).Error("failed to list datadownloads") + return errors.Wrapf(err, "error to list datadownloads") + } + + for i := range dataDownloads.Items { + dd := &dataDownloads.Items[i] + if dd.Status.Phase == velerov2alpha1api.DataDownloadPhasePrepared { + // keep doing nothing let controller re-download the data + // the Prepared CR could be still handled by datadownload controller after node-agent restart + logger.WithField("datadownload", dd.GetName()).Debug("find a datadownload with status prepared") + } else if dd.Status.Phase == velerov2alpha1api.DataDownloadPhaseInProgress { + if dd.Status.Node != r.nodeName { + logger.WithField("dd", dd.Name).WithField("current node", r.nodeName).Infof("DD should be resumed by another node %s", dd.Status.Node) + continue + } + + err := funcResumeCancellableDataRestore(r, ctx, dd, logger) + if err == nil { + continue + } + + logger.WithField("datadownload", dd.GetName()).WithError(err).Warn("Failed to resume data path for dd, have to cancel it") + + resumeErr := err + err = UpdateDataDownloadWithRetry(ctx, cli, types.NamespacedName{Namespace: dd.Namespace, Name: dd.Name}, logger.WithField("datadownload", dd.Name), + func(dataDownload *velerov2alpha1api.DataDownload) bool { + if dataDownload.Spec.Cancel { + return false + } + + dataDownload.Spec.Cancel = true + dataDownload.Status.Message = fmt.Sprintf("Resume InProgress datadownload failed with error %v, mark it as cancel", resumeErr) + + return true + }) + if err != nil { + logger.WithError(errors.WithStack(err)).WithError(errors.WithStack(err)).Error("Failed to trigger dataupload cancel") + } + } else if dd.Status.Phase == velerov2alpha1api.DataDownloadPhaseAccepted { + r.logger.WithField("datadownload", dd.GetName()).Warn("Cancel dd under Accepted phase") + + err := UpdateDataDownloadWithRetry(ctx, cli, types.NamespacedName{Namespace: dd.Namespace, Name: dd.Name}, + r.logger.WithField("datadownload", dd.Name), func(dataDownload *velerov2alpha1api.DataDownload) bool { + if dataDownload.Spec.Cancel { + return false + } + + dataDownload.Spec.Cancel = true + dataDownload.Status.Message = "Datadownload is in Accepted status during the node-agent starting, mark it as cancel" + + return true + }) + if err != nil { + r.logger.WithField("datadownload", dd.GetName()).WithError(err).Errorf("Failed to trigger dataupload cancel") } } } - //If the data download is in Accepted status, the expoded PVC may be not created - // so we need to mark the data download as canceled for it may not be recoverable - r.CancelAcceptedDataDownload(ctx, cli, ns) + return nil +} + +func (r *DataDownloadReconciler) resumeCancellableDataPath(ctx context.Context, dd *velerov2alpha1api.DataDownload, log logrus.FieldLogger) error { + log.Info("Resume cancelable dataDownload") + + res, err := r.restoreExposer.GetExposed(ctx, getDataDownloadOwnerObject(dd), r.client, r.nodeName, dd.Spec.OperationTimeout.Duration) + if err != nil { + return errors.Wrapf(err, "error to get exposed volume for dd %s", dd.Name) + } + + if res == nil { + return errors.Errorf("expose info missed for dd %s", dd.Name) + } + + callbacks := datapath.Callbacks{ + OnCompleted: r.OnDataDownloadCompleted, + OnFailed: r.OnDataDownloadFailed, + OnCancelled: r.OnDataDownloadCancelled, + OnProgress: r.OnDataDownloadProgress, + } + + asyncBR, err := r.dataPathMgr.CreateMicroServiceBRWatcher(ctx, r.client, r.kubeClient, r.mgr, datapath.TaskTypeBackup, dd.Name, dd.Namespace, res.ByPod.HostingPod.Name, res.ByPod.HostingContainer, dd.Name, callbacks, true, log) + if err != nil { + return errors.Wrapf(err, "error to create asyncBR watcher for dd %s", dd.Name) + } + + resumeComplete := false + defer func() { + if !resumeComplete { + r.closeDataPath(ctx, dd.Name) + } + }() + + if err := asyncBR.Init(ctx, nil); err != nil { + return errors.Wrapf(err, "error to init asyncBR watcher for dd %s", dd.Name) + } + + if err := asyncBR.StartRestore(dd.Spec.SnapshotID, datapath.AccessPoint{ + ByPath: res.ByPod.VolumeName, + }, nil); err != nil { + return errors.Wrapf(err, "error to resume asyncBR watcher for dd %s", dd.Name) + } + + resumeComplete = true + + log.Infof("asyncBR is resumed for dd %s", dd.Name) + return nil } diff --git a/pkg/controller/data_download_controller_test.go b/pkg/controller/data_download_controller_test.go index 385870426..d20db30f0 100644 --- a/pkg/controller/data_download_controller_test.go +++ b/pkg/controller/data_download_controller_test.go @@ -904,12 +904,11 @@ func TestUpdateDataDownloadWithRetry(t *testing.T) { testCases := []struct { Name string needErrs []bool + noChange bool ExpectErr bool }{ { - Name: "SuccessOnFirstAttempt", - needErrs: []bool{false, false, false, false}, - ExpectErr: false, + Name: "SuccessOnFirstAttempt", }, { Name: "Error get", @@ -921,6 +920,11 @@ func TestUpdateDataDownloadWithRetry(t *testing.T) { needErrs: []bool{false, false, true, false, false}, ExpectErr: true, }, + { + Name: "no change", + noChange: true, + needErrs: []bool{false, false, true, false, false}, + }, { Name: "Conflict with error timeout", needErrs: []bool{false, false, false, false, true}, @@ -936,8 +940,14 @@ func TestUpdateDataDownloadWithRetry(t *testing.T) { require.NoError(t, err) err = r.client.Create(ctx, dataDownloadBuilder().Result()) require.NoError(t, err) - updateFunc := func(dataDownload *velerov2alpha1api.DataDownload) { + updateFunc := func(dataDownload *velerov2alpha1api.DataDownload) bool { + if tc.noChange { + return false + } + dataDownload.Spec.Cancel = true + + return true } err = UpdateDataDownloadWithRetry(ctx, r.client, namespacedName, velerotest.NewLogger().WithField("name", tc.Name), updateFunc) if tc.ExpectErr { @@ -949,136 +959,115 @@ func TestUpdateDataDownloadWithRetry(t *testing.T) { } } -func TestFindDataDownloads(t *testing.T) { - tests := []struct { - name string - pod corev1.Pod - du *velerov2alpha1api.DataDownload - expectedUploads []velerov2alpha1api.DataDownload - expectedError bool - }{ - // Test case 1: Pod with matching nodeName and DataDownload label - { - name: "MatchingPod", - pod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "velero", - Name: "pod-1", - Labels: map[string]string{ - velerov1api.DataDownloadLabel: dataDownloadName, - }, - }, - Spec: corev1.PodSpec{ - NodeName: "node-1", - }, - }, - du: dataDownloadBuilder().Result(), - expectedUploads: []velerov2alpha1api.DataDownload{ - { - ObjectMeta: metav1.ObjectMeta{ - Namespace: "velero", - Name: dataDownloadName, - }, - }, - }, - expectedError: false, - }, - // Test case 2: Pod with non-matching nodeName - { - name: "NonMatchingNodePod", - pod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "velero", - Name: "pod-2", - Labels: map[string]string{ - velerov1api.DataDownloadLabel: dataDownloadName, - }, - }, - Spec: corev1.PodSpec{ - NodeName: "node-2", - }, - }, - du: dataDownloadBuilder().Result(), - expectedUploads: []velerov2alpha1api.DataDownload{}, - expectedError: false, - }, - } +type ddResumeTestHelper struct { + resumeErr error + getExposeErr error + exposeResult *exposer.ExposeResult + asyncBR datapath.AsyncBR +} - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - r, err := initDataDownloadReconcilerWithError(nil) - require.NoError(t, err) - r.nodeName = "node-1" - err = r.client.Create(ctx, test.du) - require.NoError(t, err) - err = r.client.Create(ctx, &test.pod) - require.NoError(t, err) - uploads, err := r.FindDataDownloads(context.Background(), r.client, "velero") +func (dt *ddResumeTestHelper) resumeCancellableDataPath(_ *DataUploadReconciler, _ context.Context, _ *velerov2alpha1api.DataUpload, _ logrus.FieldLogger) error { + return dt.resumeErr +} - if test.expectedError { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Equal(t, len(test.expectedUploads), len(uploads)) - } - }) - } +func (dt *ddResumeTestHelper) Expose(context.Context, corev1.ObjectReference, string, string, map[string]string, time.Duration) error { + return nil +} + +func (dt *ddResumeTestHelper) GetExposed(context.Context, corev1.ObjectReference, kbclient.Client, string, time.Duration) (*exposer.ExposeResult, error) { + return dt.exposeResult, dt.getExposeErr +} + +func (dt *ddResumeTestHelper) PeekExposed(context.Context, corev1.ObjectReference) error { + return nil +} + +func (dt *ddResumeTestHelper) RebindVolume(context.Context, corev1.ObjectReference, string, string, time.Duration) error { + return nil +} + +func (dt *ddResumeTestHelper) CleanUp(context.Context, corev1.ObjectReference) {} + +func (dt *ddResumeTestHelper) newMicroServiceBRWatcher(kbclient.Client, kubernetes.Interface, manager.Manager, string, string, string, string, string, string, + datapath.Callbacks, logrus.FieldLogger) datapath.AsyncBR { + return dt.asyncBR } func TestAttemptDataDownloadResume(t *testing.T) { tests := []struct { - name string - dataUploads []velerov2alpha1api.DataDownload - du *velerov2alpha1api.DataDownload - pod *corev1.Pod - needErrs []bool - acceptedDataDownloads []string - prepareddDataDownloads []string - cancelledDataDownloads []string - expectedError bool + name string + dataUploads []velerov2alpha1api.DataDownload + dd *velerov2alpha1api.DataDownload + needErrs []bool + resumeErr error + acceptedDataDownloads []string + prepareddDataDownloads []string + cancelledDataDownloads []string + inProgressDataDownloads []string + expectedError string }{ - // Test case 1: Process Accepted DataDownload { - name: "AcceptedDataDownload", - pod: builder.ForPod(velerov1api.DefaultNamespace, dataDownloadName).Volumes(&corev1.Volume{Name: dataDownloadName}).NodeName("node-1").Labels(map[string]string{ - velerov1api.DataDownloadLabel: dataDownloadName, + name: "accepted DataDownload with no dd label", + dd: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).Result(), + cancelledDataDownloads: []string{dataDownloadName}, + acceptedDataDownloads: []string{dataDownloadName}, + }, + { + name: "accepted DataDownload in the current node", + dd: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).Labels(map[string]string{acceptNodeLabelKey: "node-1"}).Result(), + cancelledDataDownloads: []string{dataDownloadName}, + acceptedDataDownloads: []string{dataDownloadName}, + }, + { + name: "accepted DataDownload with dd label but is canceled", + dd: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).Cancel(true).Labels(map[string]string{ + acceptNodeLabelKey: "node-1", }).Result(), - du: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).Result(), + acceptedDataDownloads: []string{dataDownloadName}, + cancelledDataDownloads: []string{dataDownloadName}, + }, + { + name: "accepted DataDownload with dd label but cancel fail", + dd: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).Labels(map[string]string{ + acceptNodeLabelKey: "node-1", + }).Result(), + needErrs: []bool{false, false, true, false, false, false}, acceptedDataDownloads: []string{dataDownloadName}, - expectedError: false, }, - // Test case 2: Cancel an Accepted DataDownload { - name: "CancelAcceptedDataDownload", - du: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).Result(), - }, - // Test case 3: Process Accepted Prepared DataDownload - { - name: "PreparedDataDownload", - pod: builder.ForPod(velerov1api.DefaultNamespace, dataDownloadName).Volumes(&corev1.Volume{Name: dataDownloadName}).NodeName("node-1").Labels(map[string]string{ - velerov1api.DataDownloadLabel: dataDownloadName, - }).Result(), - du: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhasePrepared).Result(), + name: "prepared DataDownload", + dd: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhasePrepared).Result(), prepareddDataDownloads: []string{dataDownloadName}, }, - // Test case 4: Process Accepted InProgress DataDownload { - name: "InProgressDataDownload", - pod: builder.ForPod(velerov1api.DefaultNamespace, dataDownloadName).Volumes(&corev1.Volume{Name: dataDownloadName}).NodeName("node-1").Labels(map[string]string{ - velerov1api.DataDownloadLabel: dataDownloadName, - }).Result(), - du: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhasePrepared).Result(), - prepareddDataDownloads: []string{dataDownloadName}, + name: "InProgress DataDownload, not the current node", + dd: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseInProgress).Result(), + inProgressDataDownloads: []string{dataDownloadName}, }, - // Test case 5: get resume error { - name: "ResumeError", - pod: builder.ForPod(velerov1api.DefaultNamespace, dataDownloadName).Volumes(&corev1.Volume{Name: dataDownloadName}).NodeName("node-1").Labels(map[string]string{ - velerov1api.DataDownloadLabel: dataDownloadName, - }).Result(), + name: "InProgress DataDownload, no resume error", + dd: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseInProgress).Node("node-1").Result(), + inProgressDataDownloads: []string{dataDownloadName}, + }, + { + name: "InProgress DataDownload, resume error, cancel error", + dd: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseInProgress).Node("node-1").Result(), + resumeErr: errors.New("fake-resume-error"), + needErrs: []bool{false, false, true, false, false, false}, + inProgressDataDownloads: []string{dataDownloadName}, + }, + { + name: "InProgress DataDownload, resume error, cancel succeed", + dd: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseInProgress).Node("node-1").Result(), + resumeErr: errors.New("fake-resume-error"), + cancelledDataDownloads: []string{dataDownloadName}, + inProgressDataDownloads: []string{dataDownloadName}, + }, + { + name: "Error", needErrs: []bool{false, false, false, false, false, true}, - du: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhasePrepared).Result(), - expectedError: true, + dd: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhasePrepared).Result(), + expectedError: "error to list datadownloads: List error", }, } @@ -1089,30 +1078,31 @@ func TestAttemptDataDownloadResume(t *testing.T) { r.nodeName = "node-1" require.NoError(t, err) defer func() { - r.client.Delete(ctx, test.du, &kbclient.DeleteOptions{}) - if test.pod != nil { - r.client.Delete(ctx, test.pod, &kbclient.DeleteOptions{}) - } + r.client.Delete(ctx, test.dd, &kbclient.DeleteOptions{}) }() - assert.NoError(t, r.client.Create(ctx, test.du)) - if test.pod != nil { - assert.NoError(t, r.client.Create(ctx, test.pod)) - } - // Run the test - err = r.AttemptDataDownloadResume(ctx, r.client, r.logger.WithField("name", test.name), test.du.Namespace) + assert.NoError(t, r.client.Create(ctx, test.dd)) - if test.expectedError { - assert.Error(t, err) + dt := &duResumeTestHelper{ + resumeErr: test.resumeErr, + } + + funcResumeCancellableDataBackup = dt.resumeCancellableDataPath + + // Run the test + err = r.AttemptDataDownloadResume(ctx, r.client, r.logger.WithField("name", test.name), test.dd.Namespace) + + if test.expectedError != "" { + assert.EqualError(t, err, test.expectedError) } else { assert.NoError(t, err) // Verify DataDownload marked as Canceled for _, duName := range test.cancelledDataDownloads { - dataUpload := &velerov2alpha1api.DataDownload{} - err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: duName}, dataUpload) + dataDownload := &velerov2alpha1api.DataDownload{} + err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: duName}, dataDownload) require.NoError(t, err) - assert.Equal(t, velerov2alpha1api.DataDownloadPhaseCanceled, dataUpload.Status.Phase) + assert.True(t, dataDownload.Spec.Cancel) } // Verify DataDownload marked as Accepted for _, duName := range test.acceptedDataDownloads { @@ -1132,3 +1122,108 @@ func TestAttemptDataDownloadResume(t *testing.T) { }) } } + +func TestResumeCancellableRestore(t *testing.T) { + tests := []struct { + name string + dataDownloads []velerov2alpha1api.DataDownload + dd *velerov2alpha1api.DataDownload + getExposeErr error + exposeResult *exposer.ExposeResult + createWatcherErr error + initWatcherErr error + startWatcherErr error + mockInit bool + mockStart bool + mockClose bool + expectedError string + }{ + { + name: "get expose failed", + dd: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseInProgress).Result(), + getExposeErr: errors.New("fake-expose-error"), + expectedError: fmt.Sprintf("error to get exposed volume for dd %s: fake-expose-error", dataDownloadName), + }, + { + name: "no expose", + dd: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).Node("node-1").Result(), + expectedError: fmt.Sprintf("expose info missed for dd %s", dataDownloadName), + }, + { + name: "watcher init error", + dd: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).Node("node-1").Result(), + exposeResult: &exposer.ExposeResult{ + ByPod: exposer.ExposeByPod{ + HostingPod: &corev1.Pod{}, + }, + }, + mockInit: true, + mockClose: true, + initWatcherErr: errors.New("fake-init-watcher-error"), + expectedError: fmt.Sprintf("error to init asyncBR watcher for dd %s: fake-init-watcher-error", dataDownloadName), + }, + { + name: "start watcher error", + dd: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).Node("node-1").Result(), + exposeResult: &exposer.ExposeResult{ + ByPod: exposer.ExposeByPod{ + HostingPod: &corev1.Pod{}, + }, + }, + mockInit: true, + mockStart: true, + mockClose: true, + startWatcherErr: errors.New("fake-start-watcher-error"), + expectedError: fmt.Sprintf("error to resume asyncBR watcher for dd %s: fake-start-watcher-error", dataDownloadName), + }, + { + name: "succeed", + dd: dataDownloadBuilder().Phase(velerov2alpha1api.DataDownloadPhaseAccepted).Node("node-1").Result(), + exposeResult: &exposer.ExposeResult{ + ByPod: exposer.ExposeByPod{ + HostingPod: &corev1.Pod{}, + }, + }, + mockInit: true, + mockStart: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.TODO() + r, err := initDataDownloadReconciler(nil, false) + r.nodeName = "node-1" + require.NoError(t, err) + + mockAsyncBR := datapathmockes.NewAsyncBR(t) + + if test.mockInit { + mockAsyncBR.On("Init", mock.Anything, mock.Anything).Return(test.initWatcherErr) + } + + if test.mockStart { + mockAsyncBR.On("StartRestore", mock.Anything, mock.Anything, mock.Anything).Return(test.startWatcherErr) + } + + if test.mockClose { + mockAsyncBR.On("Close", mock.Anything).Return() + } + + dt := &ddResumeTestHelper{ + getExposeErr: test.getExposeErr, + exposeResult: test.exposeResult, + asyncBR: mockAsyncBR, + } + + r.restoreExposer = dt + + datapath.MicroServiceBRWatcherCreator = dt.newMicroServiceBRWatcher + + err = r.resumeCancellableDataPath(ctx, test.dd, velerotest.NewLogger()) + if test.expectedError != "" { + assert.EqualError(t, err, test.expectedError) + } + }) + } +} diff --git a/pkg/controller/data_upload_controller.go b/pkg/controller/data_upload_controller.go index b10f1f636..c6a15ceca 100644 --- a/pkg/controller/data_upload_controller.go +++ b/pkg/controller/data_upload_controller.go @@ -153,9 +153,17 @@ func (r *DataUploadReconciler) Reconcile(ctx context.Context, req ctrl.Request) } else if controllerutil.ContainsFinalizer(du, DataUploadDownloadFinalizer) && !du.Spec.Cancel && !isDataUploadInFinalState(du) { // when delete cr we need to clear up internal resources created by Velero, here we use the cancel mechanism // to help clear up resources instead of clear them directly in case of some conflict with Expose action - if err := UpdateDataUploadWithRetry(ctx, r.client, req.NamespacedName, log, func(dataUpload *velerov2alpha1api.DataUpload) { + log.Warnf("Cancel du under phase %s because it is being deleted", du.Status.Phase) + + if err := UpdateDataUploadWithRetry(ctx, r.client, req.NamespacedName, log, func(dataUpload *velerov2alpha1api.DataUpload) bool { + if dataUpload.Spec.Cancel { + return false + } + dataUpload.Spec.Cancel = true - dataUpload.Status.Message = fmt.Sprintf("found a dataupload %s/%s is being deleted, mark it as cancel", du.Namespace, du.Name) + dataUpload.Status.Message = "Cancel dataupload because it is being deleted" + + return true }); err != nil { log.Errorf("failed to set cancel flag with error %s for %s/%s", err.Error(), du.Namespace, du.Name) return ctrl.Result{}, err @@ -600,9 +608,15 @@ func (r *DataUploadReconciler) findDataUploadForPod(ctx context.Context, podObj } } else if unrecoverable, reason := kube.IsPodUnrecoverable(pod, log); unrecoverable { // let the abnormal backup pod failed early err := UpdateDataUploadWithRetry(context.Background(), r.client, types.NamespacedName{Namespace: du.Namespace, Name: du.Name}, r.logger.WithField("dataupload", du.Name), - func(dataUpload *velerov2alpha1api.DataUpload) { + func(dataUpload *velerov2alpha1api.DataUpload) bool { + if dataUpload.Spec.Cancel { + return false + } + dataUpload.Spec.Cancel = true - dataUpload.Status.Message = fmt.Sprintf("dataupload mark as cancel to failed early for exposing pod %s/%s is in abnormal status for reason %s", pod.Namespace, pod.Name, reason) + dataUpload.Status.Message = fmt.Sprintf("Cancel dataupload because the exposing pod %s/%s is in abnormal status for reason %s", pod.Namespace, pod.Name, reason) + + return true }) if err != nil { @@ -623,75 +637,6 @@ func (r *DataUploadReconciler) findDataUploadForPod(ctx context.Context, podObj return []reconcile.Request{request} } -func (r *DataUploadReconciler) FindDataUploadsByPod(ctx context.Context, cli client.Client, ns string) ([]velerov2alpha1api.DataUpload, error) { - pods := &corev1.PodList{} - var dataUploads []velerov2alpha1api.DataUpload - if err := cli.List(ctx, pods, &client.ListOptions{Namespace: ns}); err != nil { - r.logger.WithError(errors.WithStack(err)).Error("failed to list pods on current node") - return nil, errors.Wrapf(err, "failed to list pods on current node") - } - - for _, pod := range pods.Items { - if pod.Spec.NodeName != r.nodeName { - r.logger.Debugf("Pod %s related data upload will not handled by %s nodes", pod.GetName(), r.nodeName) - continue - } - du, err := findDataUploadByPod(cli, pod) - if err != nil { - r.logger.WithError(errors.WithStack(err)).Error("failed to get dataUpload by pod") - continue - } else if du != nil { - dataUploads = append(dataUploads, *du) - } - } - return dataUploads, nil -} - -func (r *DataUploadReconciler) findAcceptDataUploadsByNodeLabel(ctx context.Context, cli client.Client, ns string) ([]velerov2alpha1api.DataUpload, error) { - dataUploads := &velerov2alpha1api.DataUploadList{} - if err := cli.List(ctx, dataUploads, &client.ListOptions{Namespace: ns}); err != nil { - r.logger.WithError(errors.WithStack(err)).Error("failed to list datauploads") - return nil, errors.Wrapf(err, "failed to list datauploads") - } - - var result []velerov2alpha1api.DataUpload - for _, du := range dataUploads.Items { - if du.Status.Phase != velerov2alpha1api.DataUploadPhaseAccepted { - continue - } - if du.Labels[acceptNodeLabelKey] == r.nodeName { - result = append(result, du) - } - } - return result, nil -} - -func (r *DataUploadReconciler) CancelAcceptedDataupload(ctx context.Context, cli client.Client, ns string) { - r.logger.Infof("Reset accepted dataupload for node %s", r.nodeName) - dataUploads, err := r.findAcceptDataUploadsByNodeLabel(ctx, cli, ns) - if err != nil { - r.logger.WithError(err).Error("failed to find dataupload") - return - } - - for _, du := range dataUploads { - if du.Spec.Cancel { - continue - } - err = UpdateDataUploadWithRetry(ctx, cli, types.NamespacedName{Namespace: du.Namespace, Name: du.Name}, r.logger.WithField("dataupload", du.Name), - func(dataUpload *velerov2alpha1api.DataUpload) { - dataUpload.Spec.Cancel = true - dataUpload.Status.Message = fmt.Sprintf("found a dataupload with status %q during the node-agent starting, mark it as cancel", du.Status.Phase) - }) - - r.logger.WithField("dataupload", du.GetName()).Warn(du.Status.Message) - if err != nil { - r.logger.WithError(errors.WithStack(err)).Errorf("failed to mark dataupload %q cancel", du.GetName()) - continue - } - } -} - func (r *DataUploadReconciler) prepareDataUpload(du *velerov2alpha1api.DataUpload) { du.Status.Phase = velerov2alpha1api.DataUploadPhasePrepared du.Status.Node = r.nodeName @@ -903,54 +848,145 @@ func isDataUploadInFinalState(du *velerov2alpha1api.DataUpload) bool { du.Status.Phase == velerov2alpha1api.DataUploadPhaseCompleted } -func UpdateDataUploadWithRetry(ctx context.Context, client client.Client, namespacedName types.NamespacedName, log *logrus.Entry, updateFunc func(dataUpload *velerov2alpha1api.DataUpload)) error { - return wait.PollUntilContextCancel(ctx, time.Second, true, func(ctx context.Context) (done bool, err error) { +func UpdateDataUploadWithRetry(ctx context.Context, client client.Client, namespacedName types.NamespacedName, log *logrus.Entry, updateFunc func(*velerov2alpha1api.DataUpload) bool) error { + return wait.PollUntilContextCancel(ctx, time.Second, true, func(ctx context.Context) (bool, error) { du := &velerov2alpha1api.DataUpload{} if err := client.Get(ctx, namespacedName, du); err != nil { return false, errors.Wrap(err, "getting DataUpload") } - updateFunc(du) - updateErr := client.Update(ctx, du) - if updateErr != nil { - if apierrors.IsConflict(updateErr) { - log.Warnf("failed to update dataupload for %s/%s and will retry it", du.Namespace, du.Name) - return false, nil + if updateFunc(du) { + err := client.Update(ctx, du) + if err != nil { + if apierrors.IsConflict(err) { + log.Warnf("failed to update dataupload for %s/%s and will retry it", du.Namespace, du.Name) + return false, nil + } else { + return false, errors.Wrapf(err, "error updating dataupload with error %s/%s", du.Namespace, du.Name) + } } - log.Errorf("failed to update dataupload with error %s for %s/%s", updateErr.Error(), du.Namespace, du.Name) - return false, err } + return true, nil }) } -func (r *DataUploadReconciler) AttemptDataUploadResume(ctx context.Context, cli client.Client, logger *logrus.Entry, ns string) error { - if dataUploads, err := r.FindDataUploadsByPod(ctx, cli, ns); err != nil { - return errors.Wrap(err, "failed to find data uploads") - } else { - for _, du := range dataUploads { - if du.Status.Phase == velerov2alpha1api.DataUploadPhasePrepared { - // keep doing nothing let controller re-download the data - // the Prepared CR could be still handled by dataupload controller after node-agent restart - logger.WithField("dataupload", du.GetName()).Debug("find a dataupload with status prepared") - } else if du.Status.Phase == velerov2alpha1api.DataUploadPhaseInProgress { - err = UpdateDataUploadWithRetry(ctx, cli, types.NamespacedName{Namespace: du.Namespace, Name: du.Name}, logger.WithField("dataupload", du.Name), - func(dataUpload *velerov2alpha1api.DataUpload) { - dataUpload.Spec.Cancel = true - dataUpload.Status.Message = fmt.Sprintf("found a dataupload with status %q during the node-agent starting, mark it as cancel", du.Status.Phase) - }) +var funcResumeCancellableDataBackup = (*DataUploadReconciler).resumeCancellableDataPath - if err != nil { - logger.WithError(errors.WithStack(err)).Errorf("failed to mark dataupload %q into canceled", du.GetName()) - continue - } - logger.WithField("dataupload", du.GetName()).Debug("mark dataupload into canceled") +func (r *DataUploadReconciler) AttemptDataUploadResume(ctx context.Context, cli client.Client, logger *logrus.Entry, ns string) error { + dataUploads := &velerov2alpha1api.DataUploadList{} + if err := cli.List(ctx, dataUploads, &client.ListOptions{Namespace: ns}); err != nil { + r.logger.WithError(errors.WithStack(err)).Error("failed to list datauploads") + return errors.Wrapf(err, "error to list datauploads") + } + + for i := range dataUploads.Items { + du := &dataUploads.Items[i] + if du.Status.Phase == velerov2alpha1api.DataUploadPhasePrepared { + // keep doing nothing let controller re-download the data + // the Prepared CR could be still handled by dataupload controller after node-agent restart + logger.WithField("dataupload", du.GetName()).Debug("find a dataupload with status prepared") + } else if du.Status.Phase == velerov2alpha1api.DataUploadPhaseInProgress { + if du.Status.Node != r.nodeName { + logger.WithField("du", du.Name).WithField("current node", r.nodeName).Infof("DU should be resumed by another node %s", du.Status.Node) + continue + } + + err := funcResumeCancellableDataBackup(r, ctx, du, logger) + if err == nil { + continue + } + + logger.WithField("dataupload", du.GetName()).WithError(err).Warn("Failed to resume data path for du, have to cancel it") + + resumeErr := err + err = UpdateDataUploadWithRetry(ctx, cli, types.NamespacedName{Namespace: du.Namespace, Name: du.Name}, logger.WithField("dataupload", du.Name), + func(dataUpload *velerov2alpha1api.DataUpload) bool { + if dataUpload.Spec.Cancel { + return false + } + + dataUpload.Spec.Cancel = true + dataUpload.Status.Message = fmt.Sprintf("Resume InProgress dataupload failed with error %v, mark it as cancel", resumeErr) + + return true + }) + if err != nil { + logger.WithField("dataupload", du.GetName()).WithError(errors.WithStack(err)).Error("Failed to trigger dataupload cancel") + } + } else if du.Status.Phase == velerov2alpha1api.DataUploadPhaseAccepted { + r.logger.WithField("dataupload", du.GetName()).Warn("Cancel du under Accepted phase") + + err := UpdateDataUploadWithRetry(ctx, cli, types.NamespacedName{Namespace: du.Namespace, Name: du.Name}, r.logger.WithField("dataupload", du.Name), + func(dataUpload *velerov2alpha1api.DataUpload) bool { + if dataUpload.Spec.Cancel { + return false + } + + dataUpload.Spec.Cancel = true + dataUpload.Status.Message = "Dataupload is in Accepted status during the node-agent starting, mark it as cancel" + + return true + }) + if err != nil { + r.logger.WithField("dataupload", du.GetName()).WithError(errors.WithStack(err)).Error("Failed to trigger dataupload cancel") } } } - //If the data upload is in Accepted status, the volume snapshot may be deleted and the exposed pod may not be created - // so we need to mark the data upload as canceled for it may not be recoverable - r.CancelAcceptedDataupload(ctx, cli, ns) + return nil +} + +func (r *DataUploadReconciler) resumeCancellableDataPath(ctx context.Context, du *velerov2alpha1api.DataUpload, log logrus.FieldLogger) error { + log.Info("Resume cancelable dataUpload") + + ep, ok := r.snapshotExposerList[du.Spec.SnapshotType] + if !ok { + return errors.Errorf("error to find exposer for du %s", du.Name) + } + + waitExposePara := r.setupWaitExposePara(du) + res, err := ep.GetExposed(ctx, getOwnerObject(du), du.Spec.OperationTimeout.Duration, waitExposePara) + if err != nil { + return errors.Wrapf(err, "error to get exposed snapshot for du %s", du.Name) + } + + if res == nil { + return errors.Errorf("expose info missed for du %s", du.Name) + } + + callbacks := datapath.Callbacks{ + OnCompleted: r.OnDataUploadCompleted, + OnFailed: r.OnDataUploadFailed, + OnCancelled: r.OnDataUploadCancelled, + OnProgress: r.OnDataUploadProgress, + } + + asyncBR, err := r.dataPathMgr.CreateMicroServiceBRWatcher(ctx, r.client, r.kubeClient, r.mgr, datapath.TaskTypeBackup, du.Name, du.Namespace, res.ByPod.HostingPod.Name, res.ByPod.HostingContainer, du.Name, callbacks, true, log) + if err != nil { + return errors.Wrapf(err, "error to create asyncBR watcher for du %s", du.Name) + } + + resumeComplete := false + defer func() { + if !resumeComplete { + r.closeDataPath(ctx, du.Name) + } + }() + + if err := asyncBR.Init(ctx, nil); err != nil { + return errors.Wrapf(err, "error to init asyncBR watcher for du %s", du.Name) + } + + if err := asyncBR.StartBackup(datapath.AccessPoint{ + ByPath: res.ByPod.VolumeName, + }, du.Spec.DataMoverConfig, nil); err != nil { + return errors.Wrapf(err, "error to resume asyncBR watcher for du %s", du.Name) + } + + resumeComplete = true + + log.Infof("asyncBR is resumed for du %s", du.Name) + return nil } diff --git a/pkg/controller/data_upload_controller_test.go b/pkg/controller/data_upload_controller_test.go index ea7603b90..8df98b60d 100644 --- a/pkg/controller/data_upload_controller_test.go +++ b/pkg/controller/data_upload_controller_test.go @@ -27,6 +27,7 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -51,6 +52,7 @@ import ( velerov2alpha1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1" "github.com/vmware-tanzu/velero/pkg/builder" "github.com/vmware-tanzu/velero/pkg/datapath" + datapathmocks "github.com/vmware-tanzu/velero/pkg/datapath/mocks" "github.com/vmware-tanzu/velero/pkg/exposer" "github.com/vmware-tanzu/velero/pkg/metrics" velerotest "github.com/vmware-tanzu/velero/pkg/test" @@ -966,12 +968,11 @@ func TestUpdateDataUploadWithRetry(t *testing.T) { testCases := []struct { Name string needErrs []bool + noChange bool ExpectErr bool }{ { - Name: "SuccessOnFirstAttempt", - needErrs: []bool{false, false, false, false}, - ExpectErr: false, + Name: "SuccessOnFirstAttempt", }, { Name: "Error get", @@ -983,6 +984,11 @@ func TestUpdateDataUploadWithRetry(t *testing.T) { needErrs: []bool{false, false, true, false, false}, ExpectErr: true, }, + { + Name: "no change", + noChange: true, + needErrs: []bool{false, false, true, false, false}, + }, { Name: "Conflict with error timeout", needErrs: []bool{false, false, false, false, true}, @@ -998,8 +1004,13 @@ func TestUpdateDataUploadWithRetry(t *testing.T) { require.NoError(t, err) err = r.client.Create(ctx, dataUploadBuilder().Result()) require.NoError(t, err) - updateFunc := func(dataDownload *velerov2alpha1api.DataUpload) { + updateFunc := func(dataDownload *velerov2alpha1api.DataUpload) bool { + if tc.noChange { + return false + } + dataDownload.Spec.Cancel = true + return true } err = UpdateDataUploadWithRetry(ctx, r.client, namespacedName, velerotest.NewLogger().WithField("name", tc.Name), updateFunc) if tc.ExpectErr { @@ -1011,135 +1022,107 @@ func TestUpdateDataUploadWithRetry(t *testing.T) { } } -func TestFindDataUploads(t *testing.T) { - tests := []struct { - name string - pod corev1.Pod - du *velerov2alpha1api.DataUpload - expectedUploads []velerov2alpha1api.DataUpload - expectedError bool - }{ - // Test case 1: Pod with matching nodeName and DataUpload label - { - name: "MatchingPod", - pod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "velero", - Name: "pod-1", - Labels: map[string]string{ - velerov1api.DataUploadLabel: dataUploadName, - }, - }, - Spec: corev1.PodSpec{ - NodeName: "node-1", - }, - }, - du: dataUploadBuilder().Result(), - expectedUploads: []velerov2alpha1api.DataUpload{ - { - ObjectMeta: metav1.ObjectMeta{ - Namespace: "velero", - Name: dataUploadName, - }, - }, - }, - expectedError: false, - }, - // Test case 2: Pod with non-matching nodeName - { - name: "NonMatchingNodePod", - pod: corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "velero", - Name: "pod-2", - Labels: map[string]string{ - velerov1api.DataUploadLabel: dataUploadName, - }, - }, - Spec: corev1.PodSpec{ - NodeName: "node-2", - }, - }, - du: dataUploadBuilder().Result(), - expectedUploads: []velerov2alpha1api.DataUpload{}, - expectedError: false, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - r, err := initDataUploaderReconcilerWithError() - require.NoError(t, err) - r.nodeName = "node-1" - err = r.client.Create(ctx, test.du) - require.NoError(t, err) - err = r.client.Create(ctx, &test.pod) - require.NoError(t, err) - uploads, err := r.FindDataUploadsByPod(context.Background(), r.client, "velero") - - if test.expectedError { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Equal(t, len(test.expectedUploads), len(uploads)) - } - }) - } +type duResumeTestHelper struct { + resumeErr error + getExposeErr error + exposeResult *exposer.ExposeResult + asyncBR datapath.AsyncBR } + +func (dt *duResumeTestHelper) resumeCancellableDataPath(_ *DataUploadReconciler, _ context.Context, _ *velerov2alpha1api.DataUpload, _ logrus.FieldLogger) error { + return dt.resumeErr +} + +func (dt *duResumeTestHelper) Expose(context.Context, corev1.ObjectReference, interface{}) error { + return nil +} + +func (dt *duResumeTestHelper) GetExposed(context.Context, corev1.ObjectReference, time.Duration, interface{}) (*exposer.ExposeResult, error) { + return dt.exposeResult, dt.getExposeErr +} + +func (dt *duResumeTestHelper) PeekExposed(context.Context, corev1.ObjectReference) error { + return nil +} + +func (dt *duResumeTestHelper) CleanUp(context.Context, corev1.ObjectReference, string, string) {} + +func (dt *duResumeTestHelper) newMicroServiceBRWatcher(kbclient.Client, kubernetes.Interface, manager.Manager, string, string, string, string, string, string, + datapath.Callbacks, logrus.FieldLogger) datapath.AsyncBR { + return dt.asyncBR +} + func TestAttemptDataUploadResume(t *testing.T) { tests := []struct { - name string - dataUploads []velerov2alpha1api.DataUpload - du *velerov2alpha1api.DataUpload - pod *corev1.Pod - needErrs []bool - acceptedDataUploads []string - prepareddDataUploads []string - cancelledDataUploads []string - expectedError bool + name string + dataUploads []velerov2alpha1api.DataUpload + du *velerov2alpha1api.DataUpload + needErrs []bool + acceptedDataUploads []string + prepareddDataUploads []string + cancelledDataUploads []string + inProgressDataUploads []string + resumeErr error + expectedError string }{ - // Test case 1: Process Accepted DataUpload { - name: "AcceptedDataUpload", - pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).NodeName("node-1").Labels(map[string]string{ - velerov1api.DataUploadLabel: dataUploadName, - }).Result(), - du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).Result(), + name: "accepted DataUpload in other node", + du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).Result(), + cancelledDataUploads: []string{dataUploadName}, + acceptedDataUploads: []string{dataUploadName}, + }, + { + name: "accepted DataUpload in the current node", + du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).Labels(map[string]string{acceptNodeLabelKey: "node-1"}).Result(), + cancelledDataUploads: []string{dataUploadName}, + acceptedDataUploads: []string{dataUploadName}, + }, + { + name: "accepted DataUpload in the current node but canceled", + du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).Labels(map[string]string{acceptNodeLabelKey: "node-1"}).Cancel(true).Result(), + cancelledDataUploads: []string{dataUploadName}, + acceptedDataUploads: []string{dataUploadName}, + }, + { + name: "accepted DataUpload in the current node but update error", + du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).Labels(map[string]string{acceptNodeLabelKey: "node-1"}).Result(), + needErrs: []bool{false, false, true, false, false, false}, acceptedDataUploads: []string{dataUploadName}, - expectedError: false, }, - // Test case 2: Cancel an Accepted DataUpload { - name: "CancelAcceptedDataUpload", - du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).Result(), - }, - // Test case 3: Process Accepted Prepared DataUpload - { - name: "PreparedDataUpload", - pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).NodeName("node-1").Labels(map[string]string{ - velerov1api.DataUploadLabel: dataUploadName, - }).Result(), + name: "prepared DataUpload", du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhasePrepared).Result(), prepareddDataUploads: []string{dataUploadName}, }, - // Test case 4: Process Accepted InProgress DataUpload { - name: "InProgressDataUpload", - pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).NodeName("node-1").Labels(map[string]string{ - velerov1api.DataUploadLabel: dataUploadName, - }).Result(), - du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhasePrepared).Result(), - prepareddDataUploads: []string{dataUploadName}, + name: "InProgress DataUpload, not the current node", + du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseInProgress).Result(), + inProgressDataUploads: []string{dataUploadName}, }, - // Test case 5: get resume error { - name: "ResumeError", - pod: builder.ForPod(velerov1api.DefaultNamespace, dataUploadName).Volumes(&corev1.Volume{Name: "dataupload-1"}).NodeName("node-1").Labels(map[string]string{ - velerov1api.DataUploadLabel: dataUploadName, - }).Result(), + name: "InProgress DataUpload, resume error and update error", + du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseInProgress).Node("node-1").Result(), + needErrs: []bool{false, false, true, false, false, false}, + resumeErr: errors.New("fake-resume-error"), + inProgressDataUploads: []string{dataUploadName}, + }, + { + name: "InProgress DataUpload, resume error and update succeed", + du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseInProgress).Node("node-1").Result(), + resumeErr: errors.New("fake-resume-error"), + cancelledDataUploads: []string{dataUploadName}, + inProgressDataUploads: []string{dataUploadName}, + }, + { + name: "InProgress DataUpload and resume succeed", + du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseInProgress).Node("node-1").Result(), + inProgressDataUploads: []string{dataUploadName}, + }, + { + name: "Error", needErrs: []bool{false, false, false, false, false, true}, du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhasePrepared).Result(), - expectedError: true, + expectedError: "error to list datauploads: List error", }, } @@ -1149,22 +1132,20 @@ func TestAttemptDataUploadResume(t *testing.T) { r, err := initDataUploaderReconciler(test.needErrs...) r.nodeName = "node-1" require.NoError(t, err) - defer func() { - r.client.Delete(ctx, test.du, &kbclient.DeleteOptions{}) - if test.pod != nil { - r.client.Delete(ctx, test.pod, &kbclient.DeleteOptions{}) - } - }() assert.NoError(t, r.client.Create(ctx, test.du)) - if test.pod != nil { - assert.NoError(t, r.client.Create(ctx, test.pod)) + + dt := &duResumeTestHelper{ + resumeErr: test.resumeErr, } + + funcResumeCancellableDataBackup = dt.resumeCancellableDataPath + // Run the test err = r.AttemptDataUploadResume(ctx, r.client, r.logger.WithField("name", test.name), test.du.Namespace) - if test.expectedError { - assert.Error(t, err) + if test.expectedError != "" { + assert.EqualError(t, err, test.expectedError) } else { assert.NoError(t, err) @@ -1173,7 +1154,7 @@ func TestAttemptDataUploadResume(t *testing.T) { dataUpload := &velerov2alpha1api.DataUpload{} err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: duName}, dataUpload) require.NoError(t, err) - assert.Equal(t, velerov2alpha1api.DataUploadPhaseCanceled, dataUpload.Status.Phase) + assert.True(t, dataUpload.Spec.Cancel) } // Verify DataUploads marked as Accepted for _, duName := range test.acceptedDataUploads { @@ -1189,6 +1170,123 @@ func TestAttemptDataUploadResume(t *testing.T) { require.NoError(t, err) assert.Equal(t, velerov2alpha1api.DataUploadPhasePrepared, dataUpload.Status.Phase) } + // Verify DataUploads marked as InProgress + for _, duName := range test.inProgressDataUploads { + dataUpload := &velerov2alpha1api.DataUpload{} + err := r.client.Get(context.Background(), types.NamespacedName{Namespace: "velero", Name: duName}, dataUpload) + require.NoError(t, err) + assert.Equal(t, velerov2alpha1api.DataUploadPhaseInProgress, dataUpload.Status.Phase) + } + } + }) + } +} + +func TestResumeCancellableBackup(t *testing.T) { + tests := []struct { + name string + dataUploads []velerov2alpha1api.DataUpload + du *velerov2alpha1api.DataUpload + getExposeErr error + exposeResult *exposer.ExposeResult + createWatcherErr error + initWatcherErr error + startWatcherErr error + mockInit bool + mockStart bool + mockClose bool + expectedError string + }{ + { + name: "not find exposer", + du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseInProgress).SnapshotType("").Result(), + expectedError: fmt.Sprintf("error to find exposer for du %s", dataUploadName), + }, + { + name: "get expose failed", + du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseInProgress).SnapshotType(velerov2alpha1api.SnapshotTypeCSI).Result(), + getExposeErr: errors.New("fake-expose-error"), + expectedError: fmt.Sprintf("error to get exposed snapshot for du %s: fake-expose-error", dataUploadName), + }, + { + name: "no expose", + du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).Node("node-1").Result(), + expectedError: fmt.Sprintf("expose info missed for du %s", dataUploadName), + }, + { + name: "watcher init error", + du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).Node("node-1").Result(), + exposeResult: &exposer.ExposeResult{ + ByPod: exposer.ExposeByPod{ + HostingPod: &corev1.Pod{}, + }, + }, + mockInit: true, + mockClose: true, + initWatcherErr: errors.New("fake-init-watcher-error"), + expectedError: fmt.Sprintf("error to init asyncBR watcher for du %s: fake-init-watcher-error", dataUploadName), + }, + { + name: "start watcher error", + du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).Node("node-1").Result(), + exposeResult: &exposer.ExposeResult{ + ByPod: exposer.ExposeByPod{ + HostingPod: &corev1.Pod{}, + }, + }, + mockInit: true, + mockStart: true, + mockClose: true, + startWatcherErr: errors.New("fake-start-watcher-error"), + expectedError: fmt.Sprintf("error to resume asyncBR watcher for du %s: fake-start-watcher-error", dataUploadName), + }, + { + name: "succeed", + du: dataUploadBuilder().Phase(velerov2alpha1api.DataUploadPhaseAccepted).Node("node-1").Result(), + exposeResult: &exposer.ExposeResult{ + ByPod: exposer.ExposeByPod{ + HostingPod: &corev1.Pod{}, + }, + }, + mockInit: true, + mockStart: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.TODO() + r, err := initDataUploaderReconciler() + r.nodeName = "node-1" + require.NoError(t, err) + + mockAsyncBR := datapathmocks.NewAsyncBR(t) + + if test.mockInit { + mockAsyncBR.On("Init", mock.Anything, mock.Anything).Return(test.initWatcherErr) + } + + if test.mockStart { + mockAsyncBR.On("StartBackup", mock.Anything, mock.Anything, mock.Anything).Return(test.startWatcherErr) + } + + if test.mockClose { + mockAsyncBR.On("Close", mock.Anything).Return() + } + + dt := &duResumeTestHelper{ + getExposeErr: test.getExposeErr, + exposeResult: test.exposeResult, + asyncBR: mockAsyncBR, + } + + r.snapshotExposerList[velerov2alpha1api.SnapshotTypeCSI] = dt + + datapath.MicroServiceBRWatcherCreator = dt.newMicroServiceBRWatcher + + err = r.resumeCancellableDataPath(ctx, test.du, velerotest.NewLogger()) + if test.expectedError != "" { + assert.EqualError(t, err, test.expectedError) } }) } diff --git a/pkg/podvolume/backupper.go b/pkg/podvolume/backupper.go index 8d06cb222..5576f4e40 100644 --- a/pkg/podvolume/backupper.go +++ b/pkg/podvolume/backupper.go @@ -36,6 +36,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/label" "github.com/vmware-tanzu/velero/pkg/nodeagent" "github.com/vmware-tanzu/velero/pkg/repository" + "github.com/vmware-tanzu/velero/pkg/uploader" uploaderutil "github.com/vmware-tanzu/velero/pkg/uploader/util" "github.com/vmware-tanzu/velero/pkg/util/boolptr" "github.com/vmware-tanzu/velero/pkg/util/kube" @@ -163,10 +164,13 @@ func (b *backupper) getMatchAction(resPolicies *resourcepolicies.Policies, pvc * return nil, errors.Errorf("failed to check resource policies for empty volume") } +var funcGetRepositoryType = getRepositoryType + func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api.Pod, volumesToBackup []string, resPolicies *resourcepolicies.Policies, log logrus.FieldLogger) ([]*velerov1api.PodVolumeBackup, *PVCBackupSummary, []error) { if len(volumesToBackup) == 0 { return nil, nil, nil } + log.Infof("pod %s/%s has volumes to backup: %v", pod.Namespace, pod.Name, volumesToBackup) var ( @@ -189,6 +193,13 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api. } } + if msg, err := uploader.ValidateUploaderType(b.uploaderType); err != nil { + skipAllPodVolumes(pod, volumesToBackup, err, pvcSummary, log) + return nil, pvcSummary, []error{err} + } else if msg != "" { + log.Warn(msg) + } + if err := kube.IsPodRunning(pod); err != nil { skipAllPodVolumes(pod, volumesToBackup, err, pvcSummary, log) return nil, pvcSummary, nil @@ -196,18 +207,21 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api. err := nodeagent.IsRunningInNode(b.ctx, backup.Namespace, pod.Spec.NodeName, b.crClient) if err != nil { - return nil, nil, []error{err} + skipAllPodVolumes(pod, volumesToBackup, err, pvcSummary, log) + return nil, pvcSummary, []error{err} } - repositoryType := getRepositoryType(b.uploaderType) + repositoryType := funcGetRepositoryType(b.uploaderType) if repositoryType == "" { err := errors.Errorf("empty repository type, uploader %s", b.uploaderType) - return nil, nil, []error{err} + skipAllPodVolumes(pod, volumesToBackup, err, pvcSummary, log) + return nil, pvcSummary, []error{err} } repo, err := b.repoEnsurer.EnsureRepo(b.ctx, backup.Namespace, pod.Namespace, backup.Spec.StorageLocation, repositoryType) if err != nil { - return nil, nil, []error{err} + skipAllPodVolumes(pod, volumesToBackup, err, pvcSummary, log) + return nil, pvcSummary, []error{err} } // get a single non-exclusive lock since we'll wait for all individual diff --git a/pkg/podvolume/backupper_test.go b/pkg/podvolume/backupper_test.go index 06cec20de..16d4ce286 100644 --- a/pkg/podvolume/backupper_test.go +++ b/pkg/podvolume/backupper_test.go @@ -309,22 +309,38 @@ func TestBackupPodVolumes(t *testing.T) { corev1api.AddToScheme(scheme) tests := []struct { - name string - bsl string - uploaderType string - volumes []string - sourcePod *corev1api.Pod - kubeClientObj []runtime.Object - ctlClientObj []runtime.Object - veleroClientObj []runtime.Object - veleroReactors []reactor - runtimeScheme *runtime.Scheme - pvbs int - errs []string + name string + bsl string + uploaderType string + volumes []string + sourcePod *corev1api.Pod + kubeClientObj []runtime.Object + ctlClientObj []runtime.Object + veleroClientObj []runtime.Object + veleroReactors []reactor + runtimeScheme *runtime.Scheme + pvbs int + mockGetRepositoryType bool + errs []string }{ { name: "empty volume list", }, + { + name: "wrong uploader type", + volumes: []string{ + "fake-volume-1", + "fake-volume-2", + }, + sourcePod: createPodObj(true, false, false, 2), + kubeClientObj: []runtime.Object{ + createNodeAgentPodObj(true), + }, + uploaderType: "fake-uploader-type", + errs: []string{ + "invalid uploader type 'fake-uploader-type', valid upload types are: 'restic', 'kopia'", + }, + }, { name: "pod is not running", volumes: []string{ @@ -348,7 +364,8 @@ func TestBackupPodVolumes(t *testing.T) { "fake-volume-1", "fake-volume-2", }, - sourcePod: createPodObj(true, false, false, 2), + sourcePod: createPodObj(true, false, false, 2), + uploaderType: "kopia", errs: []string{ "daemonset pod not found in running state in node fake-node-name", }, @@ -363,9 +380,10 @@ func TestBackupPodVolumes(t *testing.T) { kubeClientObj: []runtime.Object{ createNodeAgentPodObj(true), }, - uploaderType: "fake-uploader-type", + uploaderType: "kopia", + mockGetRepositoryType: true, errs: []string{ - "empty repository type, uploader fake-uploader-type", + "empty repository type, uploader kopia", }, }, { @@ -542,6 +560,12 @@ func TestBackupPodVolumes(t *testing.T) { require.NoError(t, err) + if test.mockGetRepositoryType { + funcGetRepositoryType = func(string) string { return "" } + } else { + funcGetRepositoryType = getRepositoryType + } + pvbs, _, errs := bp.BackupPodVolumes(backupObj, test.sourcePod, test.volumes, nil, velerotest.NewLogger()) if errs == nil { diff --git a/pkg/repository/provider/unified_repo.go b/pkg/repository/provider/unified_repo.go index a72ecdad4..ac77e5b66 100644 --- a/pkg/repository/provider/unified_repo.go +++ b/pkg/repository/provider/unified_repo.go @@ -53,7 +53,7 @@ var getGCPCredentials = repoconfig.GetGCPCredentials var getS3BucketRegion = repoconfig.GetAWSBucketRegion type localFuncTable struct { - getStorageVariables func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) + getStorageVariables func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) getStorageCredentials func(*velerov1api.BackupStorageLocation, credentials.FileStore) (map[string]string, error) } @@ -397,7 +397,7 @@ func (urp *unifiedRepoProvider) GetStoreOptions(param interface{}) (map[string]s return map[string]string{}, errors.Errorf("invalid parameter, expect %T, actual %T", RepoParam{}, param) } - storeVar, err := funcTable.getStorageVariables(repoParam.BackupLocation, urp.repoBackend, repoParam.BackupRepo.Spec.VolumeNamespace) + storeVar, err := funcTable.getStorageVariables(repoParam.BackupLocation, urp.repoBackend, repoParam.BackupRepo.Spec.VolumeNamespace, repoParam.BackupRepo.Spec.RepositoryConfig) if err != nil { return map[string]string{}, errors.Wrap(err, "error to get storage variables") } @@ -498,7 +498,7 @@ func getStorageCredentials(backupLocation *velerov1api.BackupStorageLocation, cr return result, nil } -func getStorageVariables(backupLocation *velerov1api.BackupStorageLocation, repoBackend string, repoName string) (map[string]string, error) { +func getStorageVariables(backupLocation *velerov1api.BackupStorageLocation, repoBackend string, repoName string, backupRepoConfig map[string]string) (map[string]string, error) { result := make(map[string]string) backendType := repoconfig.GetBackendType(backupLocation.Spec.Provider, backupLocation.Spec.Config) @@ -568,6 +568,12 @@ func getStorageVariables(backupLocation *velerov1api.BackupStorageLocation, repo result[udmrepo.StoreOptionOssRegion] = strings.Trim(region, "/") result[udmrepo.StoreOptionFsPath] = config["fspath"] + if backupRepoConfig != nil { + if v, found := backupRepoConfig[udmrepo.StoreOptionCacheLimit]; found { + result[udmrepo.StoreOptionCacheLimit] = v + } + } + return result, nil } diff --git a/pkg/repository/provider/unified_repo_test.go b/pkg/repository/provider/unified_repo_test.go index 5d59c151b..a5063bbbf 100644 --- a/pkg/repository/provider/unified_repo_test.go +++ b/pkg/repository/provider/unified_repo_test.go @@ -221,6 +221,7 @@ func TestGetStorageVariables(t *testing.T) { credFileStore *credmock.FileStore repoName string repoBackend string + repoConfig map[string]string getS3BucketRegion func(string) (string, error) expected map[string]string expectedErr string @@ -435,13 +436,36 @@ func TestGetStorageVariables(t *testing.T) { "region": "", }, }, + { + name: "fs with repo config", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "velero.io/fs", + Config: map[string]string{ + "fspath": "fake-path", + "prefix": "fake-prefix", + }, + }, + }, + repoBackend: "fake-repo-type", + repoConfig: map[string]string{ + udmrepo.StoreOptionCacheLimit: "1000", + }, + expected: map[string]string{ + "fspath": "fake-path", + "bucket": "", + "prefix": "fake-prefix/fake-repo-type/", + "region": "", + "cacheLimitMB": "1000", + }, + }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { getS3BucketRegion = tc.getS3BucketRegion - actual, err := getStorageVariables(&tc.backupLocation, tc.repoBackend, tc.repoName) + actual, err := getStorageVariables(&tc.backupLocation, tc.repoBackend, tc.repoName, tc.repoConfig) require.Equal(t, tc.expected, actual) @@ -530,7 +554,7 @@ func TestGetStoreOptions(t *testing.T) { BackupRepo: &velerov1api.BackupRepository{}, }, funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) { return map[string]string{}, errors.New("fake-error-2") }, }, @@ -544,7 +568,7 @@ func TestGetStoreOptions(t *testing.T) { BackupRepo: &velerov1api.BackupRepository{}, }, funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -604,7 +628,7 @@ func TestPrepareRepo(t *testing.T) { repoService: new(reposervicenmocks.BackupRepoService), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) { return map[string]string{}, errors.New("fake-store-option-error") }, }, @@ -615,7 +639,7 @@ func TestPrepareRepo(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -635,7 +659,7 @@ func TestPrepareRepo(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -656,7 +680,7 @@ func TestPrepareRepo(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -733,7 +757,7 @@ func TestForget(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -757,7 +781,7 @@ func TestForget(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -785,7 +809,7 @@ func TestForget(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -883,7 +907,7 @@ func TestBatchForget(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -907,7 +931,7 @@ func TestBatchForget(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -936,7 +960,7 @@ func TestBatchForget(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -1032,7 +1056,7 @@ func TestInitRepo(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -1050,7 +1074,7 @@ func TestInitRepo(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -1120,7 +1144,7 @@ func TestConnectToRepo(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -1138,7 +1162,7 @@ func TestConnectToRepo(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -1212,7 +1236,7 @@ func TestBoostRepoConnect(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -1239,7 +1263,7 @@ func TestBoostRepoConnect(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -1265,7 +1289,7 @@ func TestBoostRepoConnect(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -1352,7 +1376,7 @@ func TestPruneRepo(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -1370,7 +1394,7 @@ func TestPruneRepo(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, map[string]string) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { diff --git a/pkg/repository/udmrepo/kopialib/backend/common.go b/pkg/repository/udmrepo/kopialib/backend/common.go index 2896c068f..646811da9 100644 --- a/pkg/repository/udmrepo/kopialib/backend/common.go +++ b/pkg/repository/udmrepo/kopialib/backend/common.go @@ -33,8 +33,7 @@ import ( ) const ( - maxDataCacheMB = 2000 - maxMetadataCacheMB = 2000 + defaultCacheLimitMB = 5000 maxCacheDurationSecond = 30 ) @@ -67,11 +66,21 @@ func SetupNewRepositoryOptions(ctx context.Context, flags map[string]string) rep // SetupConnectOptions setups the options when connecting to an existing Kopia repository func SetupConnectOptions(ctx context.Context, repoOptions udmrepo.RepoOptions) repo.ConnectOptions { + cacheLimit := optionalHaveIntWithDefault(ctx, udmrepo.StoreOptionCacheLimit, repoOptions.StorageOptions, defaultCacheLimitMB) << 20 + + // 80% for data cache and 20% for metadata cache and align to KB + dataCacheLimit := (cacheLimit / 5 * 4) >> 10 + metadataCacheLimit := (cacheLimit / 5) >> 10 + return repo.ConnectOptions{ CachingOptions: content.CachingOptions{ - ContentCacheSizeBytes: maxDataCacheMB << 20, - MetadataCacheSizeBytes: maxMetadataCacheMB << 20, - MaxListCacheDuration: content.DurationSeconds(time.Duration(maxCacheDurationSecond) * time.Second), + // softLimit 80% + ContentCacheSizeBytes: (dataCacheLimit / 5 * 4) << 10, + MetadataCacheSizeBytes: (metadataCacheLimit / 5 * 4) << 10, + // hardLimit 100% + ContentCacheSizeLimitBytes: dataCacheLimit << 10, + MetadataCacheSizeLimitBytes: metadataCacheLimit << 10, + MaxListCacheDuration: content.DurationSeconds(time.Duration(maxCacheDurationSecond) * time.Second), }, ClientOptions: repo.ClientOptions{ Hostname: optionalHaveString(udmrepo.GenOptionOwnerDomain, repoOptions.GeneralOptions), diff --git a/pkg/repository/udmrepo/kopialib/backend/common_test.go b/pkg/repository/udmrepo/kopialib/backend/common_test.go index 8ec90f069..c5c070716 100644 --- a/pkg/repository/udmrepo/kopialib/backend/common_test.go +++ b/pkg/repository/udmrepo/kopialib/backend/common_test.go @@ -111,9 +111,11 @@ func TestSetupNewRepositoryOptions(t *testing.T) { func TestSetupConnectOptions(t *testing.T) { defaultCacheOption := content.CachingOptions{ - ContentCacheSizeBytes: 2000 << 20, - MetadataCacheSizeBytes: 2000 << 20, - MaxListCacheDuration: content.DurationSeconds(time.Duration(30) * time.Second), + ContentCacheSizeBytes: 3200 << 20, + MetadataCacheSizeBytes: 800 << 20, + ContentCacheSizeLimitBytes: 4000 << 20, + MetadataCacheSizeLimitBytes: 1000 << 20, + MaxListCacheDuration: content.DurationSeconds(time.Duration(30) * time.Second), } testCases := []struct { diff --git a/pkg/repository/udmrepo/kopialib/backend/utils.go b/pkg/repository/udmrepo/kopialib/backend/utils.go index a740a0b7b..62ba4c322 100644 --- a/pkg/repository/udmrepo/kopialib/backend/utils.go +++ b/pkg/repository/udmrepo/kopialib/backend/utils.go @@ -98,6 +98,21 @@ func optionalHaveBase64(ctx context.Context, key string, flags map[string]string return nil } +func optionalHaveIntWithDefault(ctx context.Context, key string, flags map[string]string, defValue int64) int64 { + if value, exist := flags[key]; exist { + if value != "" { + ret, err := strconv.ParseInt(value, 10, 64) + if err == nil { + return ret + } + + backendLog()(ctx).Errorf("Ignore %s, value [%s] is invalid, err %v", key, value, err) + } + } + + return defValue +} + func backendLog() func(ctx context.Context) logging.Logger { return logging.Module("kopialib-bd") } diff --git a/pkg/repository/udmrepo/kopialib/backend/utils_test.go b/pkg/repository/udmrepo/kopialib/backend/utils_test.go index 0eb238196..6f9049f41 100644 --- a/pkg/repository/udmrepo/kopialib/backend/utils_test.go +++ b/pkg/repository/udmrepo/kopialib/backend/utils_test.go @@ -90,3 +90,68 @@ func TestOptionalHaveBool(t *testing.T) { }) } } + +func TestOptionalHaveIntWithDefault(t *testing.T) { + var expectMsg string + testCases := []struct { + name string + key string + flags map[string]string + defaultValue int64 + logger *storagemocks.Core + retFuncCheck func(mock.Arguments) + expectMsg string + retValue int64 + }{ + { + name: "key not exist", + key: "fake-key", + flags: map[string]string{}, + defaultValue: 2000, + retValue: 2000, + }, + { + name: "value valid", + key: "fake-key", + flags: map[string]string{ + "fake-key": "1000", + }, + retValue: 1000, + }, + { + name: "value invalid", + key: "fake-key", + flags: map[string]string{ + "fake-key": "fake-value", + }, + logger: new(storagemocks.Core), + retFuncCheck: func(args mock.Arguments) { + ent := args[0].(zapcore.Entry) + if ent.Level == zapcore.ErrorLevel { + expectMsg = ent.Message + } + }, + expectMsg: "Ignore fake-key, value [fake-value] is invalid, err strconv.ParseInt: parsing \"fake-value\": invalid syntax", + defaultValue: 2000, + retValue: 2000, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + if tc.logger != nil { + tc.logger.On("Enabled", mock.Anything).Return(true) + tc.logger.On("Check", mock.Anything, mock.Anything).Run(tc.retFuncCheck).Return(&zapcore.CheckedEntry{}) + } + + ctx := logging.WithLogger(context.Background(), func(module string) logging.Logger { + return zap.New(tc.logger).Sugar() + }) + + retValue := optionalHaveIntWithDefault(ctx, tc.key, tc.flags, tc.defaultValue) + + require.Equal(t, retValue, tc.retValue) + require.Equal(t, tc.expectMsg, expectMsg) + }) + } +} diff --git a/pkg/repository/udmrepo/repo_options.go b/pkg/repository/udmrepo/repo_options.go index af54e0947..28eadfdb9 100644 --- a/pkg/repository/udmrepo/repo_options.go +++ b/pkg/repository/udmrepo/repo_options.go @@ -63,6 +63,8 @@ const ( StoreOptionGenRetentionPeriod = "retentionPeriod" StoreOptionGenReadOnly = "readOnly" + StoreOptionCacheLimit = "cacheLimitMB" + ThrottleOptionReadOps = "readOPS" ThrottleOptionWriteOps = "writeOPS" ThrottleOptionListOps = "listOPS" diff --git a/pkg/uploader/types.go b/pkg/uploader/types.go index 02106e266..fb79f7c9f 100644 --- a/pkg/uploader/types.go +++ b/pkg/uploader/types.go @@ -39,12 +39,17 @@ const ( // ValidateUploaderType validates if the input param is a valid uploader type. // It will return an error if it's invalid. -func ValidateUploaderType(t string) error { +func ValidateUploaderType(t string) (string, error) { t = strings.TrimSpace(t) if t != ResticType && t != KopiaType { - return fmt.Errorf("invalid uploader type '%s', valid upload types are: '%s', '%s'", t, ResticType, KopiaType) + return "", fmt.Errorf("invalid uploader type '%s', valid upload types are: '%s', '%s'", t, ResticType, KopiaType) } - return nil + + if t == ResticType { + return fmt.Sprintf("Uploader '%s' is deprecated, don't use it for new backups, otherwise the backups won't be available for restore when this functionality is removed in a future version of Velero", t), nil + } + + return "", nil } type SnapshotInfo struct { diff --git a/pkg/uploader/types_test.go b/pkg/uploader/types_test.go index 492051bf2..e92f20c79 100644 --- a/pkg/uploader/types_test.go +++ b/pkg/uploader/types_test.go @@ -1,34 +1,47 @@ package uploader -import "testing" +import ( + "testing" + + "github.com/stretchr/testify/assert" +) func TestValidateUploaderType(t *testing.T) { tests := []struct { name string input string - wantErr bool + wantErr string + wantMsg string }{ { "'restic' is a valid type", "restic", - false, + "", + "Uploader 'restic' is deprecated, don't use it for new backups, otherwise the backups won't be available for restore when this functionality is removed in a future version of Velero", }, { "' kopia ' is a valid type (space will be trimmed)", " kopia ", - false, + "", + "", }, { "'anything_else' is invalid", "anything_else", - true, + "invalid uploader type 'anything_else', valid upload types are: 'restic', 'kopia'", + "", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if err := ValidateUploaderType(tt.input); (err != nil) != tt.wantErr { - t.Errorf("ValidateUploaderType(), input = '%s' error = %v, wantErr %v", tt.input, err, tt.wantErr) + msg, err := ValidateUploaderType(tt.input) + if tt.wantErr != "" { + assert.EqualError(t, err, tt.wantErr) + } else { + assert.NoError(t, err) } + + assert.Equal(t, tt.wantMsg, msg) }) } } diff --git a/site/content/docs/main/file-system-backup.md b/site/content/docs/main/file-system-backup.md index 1e4917eb4..5dac5e3c5 100644 --- a/site/content/docs/main/file-system-backup.md +++ b/site/content/docs/main/file-system-backup.md @@ -28,6 +28,7 @@ Cons: - It access the file system from the mounted hostpath directory, so Velero Node Agent pods need to run as root user and even under privileged mode in some environments. **NOTE:** hostPath volumes are not supported, but the [local volume type][5] is supported. +**NOTE:** restic is under the deprecation process by following [Velero Deprecation Policy][17], for more details, see the Restic Deprecation section. ## Setup File System Backup @@ -643,6 +644,39 @@ If you want to constraint the CPU/memory usage, you need to [customize the resou During the restore, the repository may also cache data/metadata so as to reduce the network footprint and speed up the restore. The repository uses its own policy to store and clean up the cache. For Kopia repository, the cache is stored in the node-agent pod's root file system and the cleanup is triggered for the data/metadata that are older than 10 minutes (not configurable at present). So you should prepare enough disk space, otherwise, the node-agent pod may be evicted due to running out of the ephemeral storage. +## Restic Deprecation + +According to the [Velero Deprecation Policy][17], restic path is being deprecated starting from v1.15, specifically: +- For 1.15 and 1.16, if restic path is used by a backup, the backup still creates and succeeds but you will see warnings +- For 1.17 and 1.18, backups with restic path are disabled, but you are still allowed to restore from your previous restic backups +- From 1.19, both backups and restores with restic path will be disabled, you are not able to use 1.19 or higher to restore your restic backup data + +For 1.15 and 1.16, you will see below warnings if `--uploader-type=restic` is used in Velero installation: +In the output of installation: +``` +⚠️ Uploader 'restic' is deprecated, don't use it for new backups, otherwise the backups won't be available for restore when this functionality is removed in a future version of Velero +``` +In Velero server log: +``` +level=warning msg="Uploader 'restic' is deprecated, don't use it for new backups, otherwise the backups won't be available for restore when this functionality is removed in a future version of Velero +``` +In the output of `velero backup describe` command for a backup with fs-backup: +``` + Namespaces: + : resource: /pods name: message: /Uploader 'restic' is deprecated, don't use it for new backups, otherwise the backups won't be available for restore when this functionality is removed in a future version of Velero +``` + +And you will see below warnings you upgrade from v1.9 or lower to 1.15 or 1.16: +In Velero server log: +``` +level=warning msg="Uploader 'restic' is deprecated, don't use it for new backups, otherwise the backups won't be available for restore when this functionality is removed in a future version of Velero +``` +In the output of `velero backup describe` command for a backup with fs-backup: +``` + Namespaces: + : resource: /pods name: message: /Uploader 'restic' is deprecated, don't use it for new backups, otherwise the backups won't be available for restore when this functionality is removed in a future version of Velero +``` + [1]: https://github.com/restic/restic [2]: https://github.com/kopia/kopia @@ -660,3 +694,4 @@ For Kopia repository, the cache is stored in the node-agent pod's root file syst [14]: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/ [15]: customize-installation.md#customize-resource-requests-and-limits [16]: performance-guidance.md +[17]: https://github.com/vmware-tanzu/velero/blob/main/GOVERNANCE.md#deprecation-policy diff --git a/site/content/docs/main/restore-reference.md b/site/content/docs/main/restore-reference.md index a7ec86e07..012258ff3 100644 --- a/site/content/docs/main/restore-reference.md +++ b/site/content/docs/main/restore-reference.md @@ -275,6 +275,12 @@ You can also configure the existing resource policy in a [Restore](api-types/res * Update of a resource only applies to the Kubernetes resource data such as its spec. It may not work as expected for certain resource types such as PVCs and Pods. In case of PVCs for example, data in the PV is not restored or overwritten in any way. * `update` existing resource policy works in a best-effort way, which means when restore's `--existing-resource-policy` is set to `update`, Velero will try to update the resource if the resource already exists, if the update fails, Velero will fall back to the default non-destructive way in the restore, and just logs a warning without failing the restore. +## Restore "status" field of objects + +By default, Velero will remove the `status` field of an object before it's restored. This is because the value `status` field is typically set by the controller during reconciliation. However, some custom resources are designed to store environment specific information in the `status` field, and it is important to preserve such information during restore. + +You can use `--status-exclude-resources` and `--status-exclude-resources` flags to select the resources whose `status` field will be restored by Velero. If there are resources selected via these flags, velero will trigger another API call to update the restored object to restore `status` field after it's created. + ## Write Sparse files If using fs-restore or CSI snapshot data movements, it's supported to write sparse files during restore by the below command: ```bash