diff --git a/changelogs/unreleased/6914-shubham-pampattiwar b/changelogs/unreleased/6914-shubham-pampattiwar new file mode 100644 index 000000000..c95688b39 --- /dev/null +++ b/changelogs/unreleased/6914-shubham-pampattiwar @@ -0,0 +1 @@ +Fix issue 6913: Velero Built-in Datamover: Backup stucks in phase WaitingForPluginOperations when Node Agent pod gets restarted \ No newline at end of file diff --git a/changelogs/unreleased/6917-27149chen b/changelogs/unreleased/6917-27149chen new file mode 100644 index 000000000..94648eaa4 --- /dev/null +++ b/changelogs/unreleased/6917-27149chen @@ -0,0 +1 @@ +Support JSON Merge Patch and Strategic Merge Patch in Resource Modifiers \ No newline at end of file diff --git a/changelogs/unreleased/6923-reasonerjt b/changelogs/unreleased/6923-reasonerjt new file mode 100644 index 000000000..2a9c2aabd --- /dev/null +++ b/changelogs/unreleased/6923-reasonerjt @@ -0,0 +1 @@ +Bump up aws sdk to aws-sdk-go-v2 diff --git a/changelogs/unreleased/6950-Lyndon-Li b/changelogs/unreleased/6950-Lyndon-Li new file mode 100644 index 000000000..a1d0b5983 --- /dev/null +++ b/changelogs/unreleased/6950-Lyndon-Li @@ -0,0 +1 @@ +Add the design for node-agent concurrency \ No newline at end of file diff --git a/changelogs/unreleased/6962-blackpiglet b/changelogs/unreleased/6962-blackpiglet new file mode 100644 index 000000000..1159efe9b --- /dev/null +++ b/changelogs/unreleased/6962-blackpiglet @@ -0,0 +1 @@ +Add the PV backup information design document. \ No newline at end of file diff --git a/changelogs/unreleased/6968-blackpiglet b/changelogs/unreleased/6968-blackpiglet new file mode 100644 index 000000000..9a3461f24 --- /dev/null +++ b/changelogs/unreleased/6968-blackpiglet @@ -0,0 +1 @@ +Check whether the action is a CSI action and whether CSI feature is enabled, before executing the action. \ No newline at end of file diff --git a/changelogs/unreleased/6976-Lyndon-Li b/changelogs/unreleased/6976-Lyndon-Li new file mode 100644 index 000000000..06cf694ad --- /dev/null +++ b/changelogs/unreleased/6976-Lyndon-Li @@ -0,0 +1 @@ +It is a valid case that the Status.RestoreSize field in VolumeSnapshot is not set, if so, get the volume size from the source PVC to create the backup PVC \ No newline at end of file diff --git a/changelogs/unreleased/6989-blackpiglet b/changelogs/unreleased/6989-blackpiglet new file mode 100644 index 000000000..5dfb8f1cf --- /dev/null +++ b/changelogs/unreleased/6989-blackpiglet @@ -0,0 +1 @@ +Limit PVC block mode logic to non-Windows platform. \ No newline at end of file diff --git a/changelogs/unreleased/6990-Lyndon-Li b/changelogs/unreleased/6990-Lyndon-Li new file mode 100644 index 000000000..6f79e1c9d --- /dev/null +++ b/changelogs/unreleased/6990-Lyndon-Li @@ -0,0 +1 @@ +Fix #6988, always get region from BSL if it is not empty \ No newline at end of file diff --git a/changelogs/unreleased/6995-kaovilai b/changelogs/unreleased/6995-kaovilai new file mode 100644 index 000000000..ef3ab9406 --- /dev/null +++ b/changelogs/unreleased/6995-kaovilai @@ -0,0 +1 @@ +Fix unified repository (kopia) s3 credentials profile selection \ No newline at end of file diff --git a/changelogs/unreleased/7001-Lyndon-Li b/changelogs/unreleased/7001-Lyndon-Li new file mode 100644 index 000000000..c4f7c3bed --- /dev/null +++ b/changelogs/unreleased/7001-Lyndon-Li @@ -0,0 +1 @@ +Bump kopia to 0.15.0 \ No newline at end of file diff --git a/changelogs/unreleased/7004-kaovilai b/changelogs/unreleased/7004-kaovilai new file mode 100644 index 000000000..3b85df196 --- /dev/null +++ b/changelogs/unreleased/7004-kaovilai @@ -0,0 +1 @@ +restore: Use warning when Create IsAlreadyExist and Get error \ No newline at end of file diff --git a/changelogs/unreleased/7011-Lyndon-Li b/changelogs/unreleased/7011-Lyndon-Li new file mode 100644 index 000000000..81c69ad5a --- /dev/null +++ b/changelogs/unreleased/7011-Lyndon-Li @@ -0,0 +1 @@ +Fix #6964. Don't use csiSnapshotTimeout (10 min) for waiting snapshot to readyToUse for data mover, so as to make the behavior complied with CSI snapshot backup \ No newline at end of file diff --git a/changelogs/unreleased/7022-allenxu404 b/changelogs/unreleased/7022-allenxu404 new file mode 100644 index 000000000..139a5e842 --- /dev/null +++ b/changelogs/unreleased/7022-allenxu404 @@ -0,0 +1 @@ +Fix inconsistent behavior of Backup and Restore hook execution \ No newline at end of file diff --git a/changelogs/unreleased/7026-blackpiglet b/changelogs/unreleased/7026-blackpiglet new file mode 100644 index 000000000..0decfabf7 --- /dev/null +++ b/changelogs/unreleased/7026-blackpiglet @@ -0,0 +1 @@ +Add HealthCheckNodePort deletion logic for Service restore. \ No newline at end of file diff --git a/changelogs/unreleased/7028-shubham-pampattiwar b/changelogs/unreleased/7028-shubham-pampattiwar new file mode 100644 index 000000000..4ed72fa6d --- /dev/null +++ b/changelogs/unreleased/7028-shubham-pampattiwar @@ -0,0 +1 @@ +Add description markers for dataupload and datadownload CRDs \ No newline at end of file diff --git a/changelogs/unreleased/7032-deefdragon b/changelogs/unreleased/7032-deefdragon new file mode 100644 index 000000000..710ed3a11 --- /dev/null +++ b/changelogs/unreleased/7032-deefdragon @@ -0,0 +1 @@ +Fix #6857. Added check for matching Owner References when synchronizing backups, removing references that are not found/have mismatched uid. \ No newline at end of file diff --git a/changelogs/unreleased/7034-ywk253100 b/changelogs/unreleased/7034-ywk253100 new file mode 100644 index 000000000..b4ce2bbab --- /dev/null +++ b/changelogs/unreleased/7034-ywk253100 @@ -0,0 +1 @@ +Read information from the credential specified by BSL \ No newline at end of file diff --git a/changelogs/unreleased/7038-Lyndon-Li b/changelogs/unreleased/7038-Lyndon-Li new file mode 100644 index 000000000..f2a3a9ae2 --- /dev/null +++ b/changelogs/unreleased/7038-Lyndon-Li @@ -0,0 +1 @@ +Fix issue #7027, data mover backup exposer should not assume the first volume as the backup volume in backup pod \ No newline at end of file diff --git a/changelogs/unreleased/7041-blackpiglet b/changelogs/unreleased/7041-blackpiglet new file mode 100644 index 000000000..b347ce752 --- /dev/null +++ b/changelogs/unreleased/7041-blackpiglet @@ -0,0 +1 @@ +Remove the Velero generated client. \ No newline at end of file diff --git a/changelogs/unreleased/7046-kaovilai b/changelogs/unreleased/7046-kaovilai new file mode 100644 index 000000000..abf627039 --- /dev/null +++ b/changelogs/unreleased/7046-kaovilai @@ -0,0 +1 @@ +Update Backup.Status.CSIVolumeSnapshotsCompleted during finalize \ No newline at end of file diff --git a/changelogs/unreleased/7051-blackpiglet b/changelogs/unreleased/7051-blackpiglet new file mode 100644 index 000000000..5b930f77f --- /dev/null +++ b/changelogs/unreleased/7051-blackpiglet @@ -0,0 +1 @@ +Remove dependency of generated client part 3. \ No newline at end of file diff --git a/changelogs/unreleased/7059-Lyndon-Li b/changelogs/unreleased/7059-Lyndon-Li new file mode 100644 index 000000000..77b3a1765 --- /dev/null +++ b/changelogs/unreleased/7059-Lyndon-Li @@ -0,0 +1 @@ +Add the implementation for design #6950, configurable data path concurrency \ No newline at end of file diff --git a/changelogs/unreleased/7061-blackpiglet b/changelogs/unreleased/7061-blackpiglet new file mode 100644 index 000000000..ac965ed13 --- /dev/null +++ b/changelogs/unreleased/7061-blackpiglet @@ -0,0 +1 @@ +Add DataUpload Result and CSI VolumeSnapshot check for restore PV. \ No newline at end of file diff --git a/changelogs/unreleased/7070-blackpiglet b/changelogs/unreleased/7070-blackpiglet new file mode 100644 index 000000000..75843b730 --- /dev/null +++ b/changelogs/unreleased/7070-blackpiglet @@ -0,0 +1 @@ +Add VolumeInfo metadata structures. \ No newline at end of file diff --git a/changelogs/unreleased/7072-ywk253100 b/changelogs/unreleased/7072-ywk253100 new file mode 100644 index 000000000..2a6faffe3 --- /dev/null +++ b/changelogs/unreleased/7072-ywk253100 @@ -0,0 +1 @@ +Truncate the credential file to avoid the change of secret content messing it up \ No newline at end of file diff --git a/changelogs/unreleased/7077-Lyndon-Li b/changelogs/unreleased/7077-Lyndon-Li new file mode 100644 index 000000000..802609edf --- /dev/null +++ b/changelogs/unreleased/7077-Lyndon-Li @@ -0,0 +1 @@ +Fix issue #6693, partially fail restore if CSI snapshot is involved but CSI feature is not ready, i.e., CSI feature gate is not enabled or CSI plugin is not installed. \ No newline at end of file diff --git a/changelogs/unreleased/7081-ywk253100 b/changelogs/unreleased/7081-ywk253100 new file mode 100644 index 000000000..bc142a316 --- /dev/null +++ b/changelogs/unreleased/7081-ywk253100 @@ -0,0 +1 @@ +Skip syncing the backup which doesn't contain backup metadata \ No newline at end of file diff --git a/changelogs/unreleased/7095-Lyndon-Li b/changelogs/unreleased/7095-Lyndon-Li new file mode 100644 index 000000000..e3a11801d --- /dev/null +++ b/changelogs/unreleased/7095-Lyndon-Li @@ -0,0 +1 @@ +Fix issue #7068, due to an behavior of CSI external snapshotter, manipulations of VS and VSC may not be handled in the same order inside external snapshotter as the API is called. So add a protection finalizer to ensure the order \ No newline at end of file diff --git a/changelogs/unreleased/7096-Lyndon-Li b/changelogs/unreleased/7096-Lyndon-Li new file mode 100644 index 000000000..7ce331248 --- /dev/null +++ b/changelogs/unreleased/7096-Lyndon-Li @@ -0,0 +1 @@ +Fix issue #7094, fallback to full backup if previous snapshot is not found \ No newline at end of file diff --git a/changelogs/unreleased/7102-Lyndon-Li b/changelogs/unreleased/7102-Lyndon-Li new file mode 100644 index 000000000..4b5b81ffd --- /dev/null +++ b/changelogs/unreleased/7102-Lyndon-Li @@ -0,0 +1 @@ +Fix issue #7068, due to a behavior of CSI external snapshotter, manipulations of VS and VSC may not be handled in the same order inside external snapshotter as the API is called. So add a protection finalizer to ensure the order \ No newline at end of file diff --git a/changelogs/unreleased/7115-reasonerjt b/changelogs/unreleased/7115-reasonerjt new file mode 100644 index 000000000..5824427b0 --- /dev/null +++ b/changelogs/unreleased/7115-reasonerjt @@ -0,0 +1 @@ +Include plugin name in the error message by operations \ No newline at end of file diff --git a/config/crd/v2alpha1/bases/velero.io_datadownloads.yaml b/config/crd/v2alpha1/bases/velero.io_datadownloads.yaml index 8389028f7..c81eafc4f 100644 --- a/config/crd/v2alpha1/bases/velero.io_datadownloads.yaml +++ b/config/crd/v2alpha1/bases/velero.io_datadownloads.yaml @@ -48,6 +48,8 @@ spec: name: v2alpha1 schema: openAPIV3Schema: + description: DataDownload acts as the protocol between data mover plugins + and data mover controller for the datamover restore operation properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation diff --git a/config/crd/v2alpha1/bases/velero.io_datauploads.yaml b/config/crd/v2alpha1/bases/velero.io_datauploads.yaml index 28296365b..324a98181 100644 --- a/config/crd/v2alpha1/bases/velero.io_datauploads.yaml +++ b/config/crd/v2alpha1/bases/velero.io_datauploads.yaml @@ -49,6 +49,8 @@ spec: name: v2alpha1 schema: openAPIV3Schema: + description: DataUpload acts as the protocol between data mover plugins and + data mover controller for the datamover backup operation properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation diff --git a/config/crd/v2alpha1/crds/crds.go b/config/crd/v2alpha1/crds/crds.go index 7d592d9b6..f68d2ce64 100644 --- a/config/crd/v2alpha1/crds/crds.go +++ b/config/crd/v2alpha1/crds/crds.go @@ -29,8 +29,8 @@ import ( ) var rawCRDs = [][]byte{ - []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xbcY_o\xe4\xb8\r\x7fϧ \xb6\x0fyY;\xb7ע(\xe6mw\xd2\x02Ao\xd3\xc1e\x91w٢=\xbaȒ*ɓ\xa6E\xbf{A\xc9\xf2\xf8\x8f&N\xf6z緑(\xf2'\x92\xfa\x91\xd2\x14EqŌxD\xeb\x84V;`F\xe0\xbf<*\xfa\xe5ʧ\xbf\xb8R\xe8\x9bӧ\xab'\xa1\xf8\x0e\xf6\xbd\xf3\xba\xfb\x19\x9d\xeem\x8d\xb7\xd8\b%\xbc\xd0\xea\xaaC\xcf8\xf3lw\x05\xc0\x94ҞѰ\xa3\x9f\x00\xb5V\xdej)\xd1\x16-\xaa\U000a9bf0\xea\x85\xe4h\x83\xf2d\xfa\xf4C\xf9\xe9\xc7\xf2\x87+\x00\xc5:\xdc\x01\xe9\xe3\xfaYI\u0378+O(\xd1\xeaR\xe8+g\xb0&ŭս\xd9\xc1y\".\x1c\x8cF\xc0\xb7̳\xdbAG\x18\x96\xc2\xf9\xbf\xaf\xa6~\x12·i#{\xcb\xe4\xc2v\x98qB\xb5\xbddv>w\x05\xe0jmp\a\xf7dڰ\x1ail\xd8S\x80R\x00\xe3\xdd\x00S\xa0\xab_\xb0\xf6%<\xa0%5\xe0\x8e\xba\x97\x9ch\xff\x84փ\xc5Z\xb7J\xfc{\xd4\xed\xc0\xeb`T2\x8f\x03\xf1\x9e\xbf\xc0\x90\x8aI81\xd9\xe3G`\x8aC\xc7^\xc0\"Y\x81^M\xf4\x05\x11W\xc2Wm\x11\x84j\xf4\x0e\x8e\xde\x1b\xb7\xbb\xb9i\x85O\xe5\xab\xd6]\xd7+\xe1_nB%\x12U\xef\xb5u7\x1cO(o\x9ch\vf\xeb\xa3\xf0X\xfb\xde\xe2\r3\xa2\b\xd0U(ae\xc7\xff`\x87\x82\xe7\xaegXW\x11\x8d_\xa8<\xafD\x80\xca\x0f\xa5\x13\x1b\x96\xc6]\x9c\x1dMC䝟\xff\xfa\xf0\r\x92\xe9\x10\x8c\xa5\xf7\x83\xdf\xcf\v\xdd9\x04\xe40\xa1\x1a\xb41\x88\x8d\xd5]Љ\x8a\x1b-\x94\x0f?j)P-\xdd\xef\xfa\xaa\x13\x9e\xe2\xfe\xcf\x1e\x9d\xa7X\x95\xb0\x0f5\x1d*\x84\xde\xd0A\xe2%\xdc)س\x0e\xe5\x9e9\xfc\xcd\x03@\x9ev\x059\xf6m!\x98\xb6#K\xe1\xe8\xb5\xc9D\xea'.\xc4k\xca\x02\x0f\x06k\n\x1dy\x8f\x96\x89F\f<\xdbh\vl&[\xceT\xe6\x8f,}Y\xae]\n-0}ɭI\xc0Ԅ\xd1\x06\xd2wQr\xa5\x14@^,\x14\x16\x8dv\xc2k\xfbr.\x17\xe5JÅ\x00\xd0W3U\xa3\xdc\xd8\xc9>\b\x81P\x9c<\x89c\xde\x11ED\x05\x01\x93V\xad\xa6sq\xd9\xc1\xf1\xbb\xf3\xb4\x8a\x12ա\xa7=\xa9,\x93\v\x05\xe7>\n\xa6\xfd\xd2rg\x95\xd6\x12ْ\xf7(\xb7\xbe\xea\x13ur\xaa\x11\xedz\x8fӖ\xefR\xe07ܗIÉI\xda\x05\xe5\x1c!):\x1a/RB\x12\xf16\xa2\x1d\x8al\xc6h#Prw)\x96\xab\xf3\x916\x1c\xacl\x84sD\x99\x8e\xc7P^B\xd7\x11\x14P`\x89G\\h\xe7h2\x830\xa6`\tw\xcdD\xa3p\xf0\xe1\x03h\v\x1fb\xcb\xff\xe1cL\xd7^H_\b5\xb1\x91\xd1\xf8,\xa4Lvߕ\xc5\x14\xbd\xb1\xcdн\xdfp\xc0?\x16\xe2\v?x\xea\x7f\xc2\u07bd\x86g&\xfcX\xee2\x98G\xd3\xee#T\xd8\x10\xc5Z\xf4\xbdUt\x12\xd0Z\xa2\x1c\x17T\xea\u07bfkSN1\xe3\x8e\xda\xdf\xddnl\xe7a\x14L\xecrw\x9b\xb8\xe51Da\xa4\x98A\x12\xbc\xce\x05\x94\xa0G\x0e\t\xc5\xe8}hC\x05\x1c/X[\x90\xe7\xd2\t\xb7\xb6\xa2\x15\xd4V\xa8q\xe6Ly'\xba\x90\xe5\x12Q\xb8\xb0?\xe4Л\b\x9c(\x86\xaak\x85\xc0EӠE\xe5c}\x8d\x86\x0f\x8f\xfbkw6\x92\xd3\xd9L0\x84\x0e\xabc\xc6 \xa7\xbe\x98\";8\xea].\xf2̶\xe8\x1f\xc366\xfc\xf3m\"\x9a\x9cC\x95\x9b.1T\b\x86\xe8F\x8dpx\xdcS\a\x96\xd9\xc6\xe1q\x8d\xf0r\x95\x83\xa1\xe1\xbd\x10\xc1\x15\xcaU\xfc\x06<\xaf9v\x83N\x01\xcc\xe9\r\x96\x0f\x8f\xb9B:\xba\x03\xfc\x91y\x92\x18.(P\xbdduB:\x1fC8\xbf\x0fo\xfd&\xc0\xfbW\x11\uf5d0/\xe0\xad^~5d*\xde\xc2\"_\xa3.^\x89\\\x01\xe6\x94\x1d\xac\xdf^\xa2\xf2\x96\x8b|w\xb5\x90YR\xfcb\xfaL\x96ˉ9\xd3,f\xa7G\xf2Mmh\xb8B\xbe\xb5\x11\x8d\x0fCC\xd8\xeb\xde\x06\x1a\x1a\x9e\x8b\xe8V\xf6]\xadh\x1d\x1fZ\xa6w\xea\xad\xf6m\xbd\"\xdc\xf7,\x9f\xd4;\x96\x12*^\xec\xd3kN\xae\x7f;\xeb\x8bK\x03=\x92:\xe4\x80'T@\xad6\x13\x12y\xd2\xe9J\xf8F\xddx\xb8\xf8\\/\xafH\xc1߃\xa2Pv\xa9gʀ^\xafK\xaf:t\xdd)H\xc5JB\xf5R\xb2J\xe2\x0e\xbc\xed/\xf5\x8fكҡs\xac\xdd\"\xea\xafQ*^\x15\x87%\xc0*j*\x96=\xed\xb5\x1bb\xff\xae\xa2\xa14\xdf\xc2p\xafy\x00\xa0\xbe\xe3\x95\xe4]XB\x0f\xbe\x01\xe6@2\xb9\x9c\x1f\xa1\xbd~=@\xd5w9f\xba\xc7\xe7\xcc\xe8\xe7\xbaF\x93c\xcb\x02\x0e\x16\r\xb3٩\xd5\xf3\xect2^vrę\xe6\xb2:\xc7\xf7\xcf\xcc\xdc\xdf\xc2ax\x97\xa7\a|[\xceN\xb7\xa3\xa3\x96\xe90\x87'J\xd5w\x15Z\xf2xx\x04M\xaeO,\x999\x80L\xf1Y\xc8\xce\x1aƞ0\xa8\xa2\x93LU*^\xe0R\x97̅3\x92\xe5\x8al\xdaɬ}9\x1f\x90D:#\xbd\xbf\xb7_\x19\x9f\x8c\xf3E8\xf7\ue6cb\xc2\xf4\x05w1?>\x05\xff6\x16^\xb9\xd0͟\xe6\xb7Z\xea\x99\xf0\x16\xc1\x0f\xff\n\xe4\xe8}\xca\xd4k^\x9e\x9b\xf9=)9\xeb\xa8\xd5`@\xce'\xba\x87g\x95\xe9H_\x8d\x8f\x85;\xf8\xcf\x7f\xaf\xfe\x17\x00\x00\xff\xff\x9f\xc23\x7f`\x1b\x00\x00"), - []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xbcYK\x93۸\x11\xbeϯ\xe8r\x0e\xbe\x8c8\xebM*\x95\xd2\xcd\xd6d\xabTYOT\x96w\xee\x10\xd9$\xb1\x03\x02\f\x1e\x9aLR\xf9\xef\xa9\x06\b\x8a\x0fH\x94\x1c;\xb1\xfcŵ\xb0\xb7J\xb3\n\xe1W\x95\a\xe7\xbd֨;\xe7\x1d\xc2\x12S+'\n8D\x8d\x01\x8cU:\xe9\xc5\x16\xf3,Pu|#ۉ+\xc7{~\xe7C\x96kd\xc9C\x16Q&\xf3+\xb8\x92\xe9\x93\xf6\xb1«N\xd9КR\x15؛\x0e\x87\x12q\x03\xadV9\x1as\xe1\xdc\x13\xf9H\x86\xa7\xd3\xc0\xcc,a\xc5\xf1g&ښ}\b(\x93\xd7ذuG\xa1Z\x94\x1fw\xdb\xe7?\xeeG\xc3@\x82\xb4\xa8-\x8f0\x17\xbe\x01~\x0fFa\xac\xec{b\x18VAA\xc0\x8d\xc6kځ\x16\x16\x9d\f\xc1 ܀\xc6V\xa3Ai\x87.\x8e\x9f*\x81IP\x87\xdf1\xb7\x19\xecQ\x13\x9bx\xd0r%\x8f\xa8-h\xccU%\xf9\xbfz\xde\x06\xac\xf2\x9b\nf\xb1\xc3\xdd\xd3\xe7AR2\x01G&\x1c\xde\x03\x93\x054\xec\r4\xd2.\xe0䀟_b2\xf8\xac4\x02\x97\xa5ZCmmk\xd6\x0f\x0f\x15\xb71o\xe5\xaai\x9c\xe4\xf6\xed\xc1\xa7 ~pVi\xf3P\xe0\x11Ń\xe1Պ\xe9\xbc\xe6\x16s\xeb4>\xb0\x96\xaf\xbc\xe8\xd2箬)\xfe\xa0\xbbLgޏd\x9dy4|>\xe9\\\xf0\x00\xe5\x1e:N\xac#\rZ\x9c\fMCd\x9d/\x7f\xdd\x7f\x85\xb8\xb5w\xc6\xd4\xfa\xde\xee'Bsr\x01\x19\x8c\xcb\x12upb\xa9U\xe3y\xa2,Zť\xf5?r\xc1QN\xcdoܡ\xe1\x96\xfc\xfe\x0f\x87ƒ\xaf2\xd8\xf8d\x0e\a\x04\xd7R\x18\x15\x19l%lX\x83b\xc3\f\xfep\a\x90\xa5͊\f{\x9d\v\x86u\xc8tq\xb0\xda`\"\x96\x12g\xfcu\u0080}\x8b99\x8elGD\xbc\xe4\x1dؖJ\x03\x1b\xac\xccF\xec\xd2\xe1J_\x12c\xa7\x8b&\xf2|J\xd1D\xb1\xe4\x00\xcb\"쇕3\xa6\x00b\x9a+z\x1a\x8d\xad2\xdc*\xfdF\x8cC\x9a\xc8f\x1c\xce\x18\x9f\xbe\x9c\xc9\x1cł&\x1b\xbf\b\xb8,Ȏ؟9\x82\x87\xc0\xc0ˤd\xa5(&Ι7|[K4tD\rZ\xd2H&\x10\x9cK8\x15Q0,\x96\xa6Z\x1d\x94\x12Ȧx\x97\x1b\xbe\x97\xac5\xb5\xb2\v\xbamK\x88+\xbf\xbe\xb5H\x9bo\xf6\xdb{\xfa\x13\xc7\xe9\\\x1cy\xd1\x010\x05\x0f\x95\x13s\x90\x85\x00\xb4\xb4h\xb3߂\xe9\xc8\xe7F\x90N\bv\x10\xb8\x06\xab\xdd\\\xb1\xf3ǐ\xbe\xc8v#\x98I.\x98(\xb8\x1f\xaeO\x1d\xbf\xc8\x10r\xbf\xc2\xd6l\n5\xbd\xc5)\xffPU< \xe2}\xfe\x87Wn\xeb$\xe5\x85\xf3\a]u\xc3*\xbcZ\xa1\xc1\xf2\xa4>]\xb5\x13\xd4Q\xe5\x05ev\xcf\x1b\xaf\xef\x92f\x04\xcbߢY`y\xfe$\xcet{\x1e\x11\xa4\xb4\x9bHyN9E\x01F \x81\x05\xb8\xf6v\xd9)¹\xc6b.\xf3j\xe4\xaf\xc4\xf4X\xe93a;\x03wo\nf\xd9gu\xa4\xd6K\x96\xbc\x9a\xef=\xec\xd1.\xc5\xc8E\xd5fIc\xb0%Y\x9cr\x04I\xb2jh|\x15\x13\b\x95I%\xaf\xbar8\xb1i\xc9Q\x14\xe6\xe6h_\xb0\x87\x17b\x01\xc3z%b\xb6렊\xe8\xc13\xe8\x0e\x843\xbeE\xa3Ʉ\x02!\xa7d\x04\x89'\x8e\xdc\xc0\xbbw\xa04\xbc\v\xad\xfb\xbb\xfb\x90\x7f\x1c\x17v\xc5\xe5`\x8f\x04\xc7W.D\xdc\xf7\xa6\xb4D\xce\xed;\x06\xe5\x96@\xfc\xef\x93\xe5\x13;Xjd\xbc\xeeV\xc1+㶯]S\b\x1ey\x99{8`I\xf5\x92F봤ԆZS\x05a&\xf9[\v\xaa\xfe\x81,]F\xa5^\xb9R>8W\xd1\x00\x9c\x1e\xbe~\xcc\x0e\x17\x801F\xf2\xf6\xf1\xca\xceh\xfb\x18\xa3\x8e\x17(-\x15\xb1\xba/YOe\xb6\xbc\xd8\xeb\x0e\xeeOo\xeb\fFϦK\x12\x8f\x16/T&݃m\xaa.\xd9S\x88\x13\xb0\xf8G\x82\xcd\xf4I\xed\xbe\x7f\xa1c\xb6{\xa9\xc8k&+\n\bI\xc9\xcd'\xc7\x14\xe3Y\xa91*,\xc6\xe2\xff?k\x8a\xe4q\x99\rzɋ\x01\xef\xeezj8\xe2\x0e\xfd\xcb\xd0\x1a\xfe\xfd\x9f\xbb\xff\x06\x00\x00\xff\xff\xf1\x97\xa7\xf7F!\x00\x00"), + []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xbcY_s\xe3\xb8\r\x7fϧ\xc0l\x1f\xf2\xb2Rn\xaf\x9dN\xc7o\xbbN;\x93\xe9m\xea\xb9\xec\xe4\x9d\x12a\x99\x17\x8adI\xc8i\xda\xe9w\uf014d\xfd\xa1\xe3d\xafwz3\t\x82?\xfc\x00\x02 ]\x14ŕp\xea\x11}P\xd6l@8\x85\xff\"4\xfc+\x94O\x7f\t\xa5\xb27\xc7OWO\xca\xc8\rl\xbb@\xb6\xfd\x19\x83\xed|\x8d\xb7\xb8WF\x91\xb2\xe6\xaaE\x12R\x90\xd8\\\x01\bc,\t\x1e\x0e\xfc\x13\xa0\xb6\x86\xbc\xd5\x1a}Ѡ)\x9f\xba\n\xabNi\x89>*\x1f\xb6>\xfeP~\xfa\xb1\xfc\xe1\n\xc0\x88\x167\xc0\xfa\xa4}6\xda\n\x19\xca#j\xf4\xb6T\xf6*8\xacYq\xe3m\xe76p\x9aH\v\xfbM\x13\xe0[A\xe2\xb6\xd7\x11\x87\xb5\n\xf4\xf7\xd5\xd4O*P\x9cv\xba\xf3B/\xf6\x8e3A\x99\xa6\xd3\xc2\xcf\xe7\xae\x00Bm\x1dn\xe0\x9e\xb7v\xa2F\x1e\xebm\x8aP\n\x10RF\x96\x84\xdeye\b\xfd\xd6\xea\xae\x1d\xd8)@b\xa8\xbdr\x14Y\x98\u0082@\x82\xba\x00\xa1\xab\x0f \x02\xdc\xe3\xf3͝\xd9y\xdbx\f\t\x16\xc0/\xc1\x9a\x9d\xa0\xc3\x06\xca$^\xba\x83\b\xd8\xcf&*\x1f\xe2D?D/\x8c7\x90W\xa6\xc9!\xf8\xa6Z\x04\xd9\xf9\xe8B\xb6\xbbF\xa0\x83\nsh\xcf\"0\xa0\xc7(Q%\t\x8e^P\xec;볮sX\x97I\xb6W6\xe8Z\xf8o\xbe\xd1\xff=\xb6j\x8f\"\x1b[C\xaa)\xa3\x84\xb2&\x1f`\x9f\x1b|SpMI4Vℱ\x19&\x15\xc0y[c\b\xaf\x04<+\x98\xa1\xb8?\r\xac\xa8I\x12\xc7\x1f\x85v\a\xf1)%\x99\xfa\x80\xad\xd8\xf4+\xacC\xf3yw\xf7\xf8LJ\xd90\xbc\x920DM\x813\x05\xc3wޒ\xad\xad\x86\n\xe9\x19\xd1$\u05f7\xf6\x88\x9e\xf3\\\xa3L\x185r֖S\x81S\xce\xe6\xf8\x8e\xfax6Mz\x8c\xd1\xc3\x00\xfd\xd4\xfb\xc0{:\xf4\xa4\x86,\xdc\xeb>\x15\x98\xc9\xe8\u008ek65I\x81\xe4ʂɌ>\x97\xa2\xec\xd9I\xceR\x01<:\x8f\x01\r\xcd!\xf4\xdc\xedA\x18\xb0\xd5/XS\t\x0f\xe8Y\r\x84\x83\xed\xb4d\xe3\x8e\xe8\t<ֶ1\xeaߣ\xee\x00d\xe3\xa6Z\x10\xf6%\xe1\xf4\xc5\xdcm\x84\x86\xa3\xd0\x1d~\x8c\x94\xb5\xe2\x05<\xf2.Й\x89\xbe(\x12J\xf8\xca<)\xb3\xb7\x1b8\x10\xb9\xb0\xb9\xb9i\x14\r\x85\xb5\xb6m\xdb\x19E/7\x91oUud}\xb8\x91xD}\x13TS\b_\x1f\x14aM\x9d\xc7\x1b\xe1T\x11\xa1\x9bX\\\xcbV\xfe\xc1\xf7\xa58\\ϰ\xaeb-}\xb1&\xbe\xe2\x01.\x8c\x1c\xe8\xa2_\x9a\xac8\x11\xcdC\xcc\xce\xcf\x7f}\xf8\x06\xc3\xd6\xd1\x19K\xf6#暈\xe1\xe4\x02&L\x99=\xfa\xe4Ľ\xb7mԉF:\xab\f\xc5\x1f\xb5Vh\x96\xf4\x87\xaej\x15\xb1\xdf\xff\xd9a \xf6U\t\xdb\xd8m@\x85\xd09>Ⲅ;\x03[Ѣފ\x80\xbf\xb9\x03\x98\xe9P0\xb1os\xc1\xb4QZ\n'\xd6&\x13C\xa7s\xc6_ӓ\xff\xe0\xb0f\xd71{\xbcL\xedU_\x01\xf8\xf8\x8a\x99l9S\x99?\xb2\xfce\xab\xc0Rh\x81\xe9Kn\xcd\x00\xccLrm_\x8eB\x92\\)\x05\xd0gK\x98Gg\x83\"\xeb_N\x85\xac\\i8\xe3\x00\xfejaj\xd4\x17,\xd9F!PF2\x938\xc6\x1d\xa7\x88\xa4 b\xb2\xa6\xb1|.\xce\x13\x9c\xbe;\xe2U\x1c\xa8\x01\x89m2\xd9\x1a\xa3\f\x9c:<\x98vrK\xcb*k5\x8ae\xde\xe3\xd8\xfa\xcaIzk\xcd^5k\x1b\xa7\xcd\xe89\xc7_\xa0/\x13\x86\x93-\xd9\n\x8e9FR\xc4zQ\f\x01ɉw\xaf\x9a\xce\xe726\x7f{\x85Z\x86s\xbe\\\x9d\x8f\xc1\xe0\xb8\xcb\x05w\x8e(\x87\xe3ї\x97I\xcd#\x1b\xf3H\x88\x8d&Of\x10\xa6\x10,\xe1n?Ѩ\x02|\xf8\x00\xd6Çt\x19\xf9\xf01\x85k\xa74\x15jZx3\x1a\x9f\x95\xd6þ\xef\x8a\xe2\xb1\xfar\x03d;\xba@\xc0?\x16\xe2\v\x1e\x88;\xb3h;Yx\x16\x8a\xc6r\x97\xc1B\x85{N\xb1\x1e\xa9\xf3\x86O\x02z\xcf)'D\x95\xb6\xa3w\x19\x15\x8cp\xe1`\xe9\xee\xf6\x829\x0f\xa3\xe0\x90]\xeen\x87\xdc\xf2\x18\xbd0\xa6\x98^\x12\xc8\xe6\x1c\x8aC;#c1z\x1f\xdaX\x01ǫ\xdf%\xc8s\xe9\x01\xb7\xf5\xaaQ\xdcV\x98q\xe6\x94\xf2\x8e|U\xcc\x05\xa2\n\xd1>\x94й\x04\x9cS\fW\xd7\nA\xaa\xfd\x1e=\x1aJ\xf55m\xbc{\xdc^\x87\xd3&9\x9d\xfb\t\x86\xd8a\xb5\xc29\x94ܱ\xb3g{\xa2\xdeE\x11\t\xdf =F3.\xf0\xf3m\":\x90Õ\x9b\xafW\\\bz\xef&\x8d\xb0{\xdcr\a\x961c\xf7\xb8Fx\xbe\xcaAߊ\x9f\xf1\xe0\n\xe5\xca\x7f=\x9e\u05c8\xbd\x90N\x01\xdc\xf1\r;\xef\x1es\x85t\xa4\x03\xe8 \x88%\xfa\xab\x13T/Y\x9d0\x9c\x8fޝ߇\xb7~\x13\xe0\xed\xab\x88\xb7K\xc8g\xf0V/\xbf\x1a2\x17o\xe5Q\xaeQ\x17\xafx\xae\x00w\xcc\x0e\xd6o/Q\xf9\x9d\x8b|w\xb5\x90Y\xa6\xf8\xc5\xf4)Y.'\xe6\x99f1;=\x92ojC\xe3\xe5\xf6\xad\x8dhz\xb2\xea\xdd^w>\xa6\xa1\xfe!\x8boe\xdfՊ\xd6\xe9\thzۿԾ\xadW\xc4\xfb\x9e\x97\x93z'ƫl|r\x18ޙr\xfd\xdbI_Z\x1a\xd3#\xabC\txD\x03\xdcj\v\xa5Q\x0e:C\t߸\x1b\x8f\x17\x9f\xeb\xe5\x15)\xf2\xdd+\x8ae\x97{\xa6\f\xe8\xf5\xbaὉ\xaf;\x05\xabXI\x98NkQi\xdc\x00\xf9\xee\\\xff\x98=(-\x86 \x9aK\x89\xfak\x92JW\xc5~\t\x88\x8a\x9b\x8aeO{\x1dz߿\xabh\x18+/a\xb8\xb72\x020\xdf\xf1~\xf3.,\xb1\a\xbf\x00f\xc72\xb9\x98\x1f\xa1\xbd~=@ӵ\xb9\xcct\x8fϙ\xd1\xcfu\x8d.\x97-\v\xd8yt\xc2g\xa7V\x0f\xc7\xd3\xc9t\xd9\xc9%\xcea.\xabs|\x99\xcd\xcc\xfd-\x1e\x86w1\xdd\xe3\xbbD\xf6p;:X=\x1c\xe6\xf8xj\xba\xb6Bό\xc7\xe7ف\xfa!Kf\x0e\xa00r沓\x86\xb1'\x8c\xaa\xf8$s\x95J\x17\xb8\xa1K\x96*8-rEv\xb0d־\x9c\x0e\xc8\xea\xfd\xec\xbd\xfd\xca\xf8\x98\x9d/¹\x17\xe9\x9c\x17\xa6oˋ\xf9\xf1\x91\xfa\xb7\xd9\xe1\x95\v\xdd\xfcO\x83K-\xf5L\xf8R\x82\xef\xff\xafȥ\xf7i\xa6^\xe7\xe5\xf96\xbfgJ\xce\x12\xb5\x1a\x8c\xc8\xe5Dw\xff\xac2\x1d\xe9\xaa\xf1\xb1p\x03\xff\xf9\xef\xd5\xff\x02\x00\x00\xff\xffG\x0e\xcf\xec\xfa\x1b\x00\x00"), + []byte("\x1f\x8b\b\x00\x00\x00\x00\x00\x00\xff\xbcYK\x93۸\x11\xbeϯ\xe8r\x0e\xbe\x8c8\xebM*\x95\xd2\xcd\xd6d\xabTYOT\x96w\xee\x10\xd9\"\xb1\x03\x02\f\x1e\x9aLR\xf9\xef\xa9\x06\b\n$\xa1\x97c\x87\x87\xa9\x11\x80nt7\xd0_?\xb0X,\xeeXǟQ\x1b\xae\xe4\x12X\xc7\xf1\x9f\x16%\xfd2\xc5\xcb_L\xc1\xd5\xc3\xe1\xc3\xdd\v\x97\xd5\x12V\xceX\xd5~A\xa3\x9c.\xf1\x11\xf7\\r˕\xbckѲ\x8aY\xb6\xbc\x03`R*\xcbh\xd8\xd0O\x80RI\xab\x95\x10\xa8\x175\xca\xe2\xc5\xedp縨P{\xe6q\xeb\xc3OŇ\x9f\x8b\x9f\xee\x00$kq\t\xc4\xcfuB\xb1\xca\x14\a\x14\xa8U\xc1՝\xe9\xb0$\xb6\xb5V\xae[\xc2q\"\x90\xf5[\x06q\x1f\x99e\xbfy\x0e~Ppc\xff6\x99\xf8\x95\x1b\xeb';\xe14\x13\xa3]\xfd\xb8\xe1\xb2v\x82\xe9t\xe6\x0e\xc0\x94\xaa\xc3%<і\x1d+\x91\xc6zM\xbc\b\v`U\xe5m\xc3\xc4FsiQ\xaf\x94pm\xb4\xc9\x02*4\xa5\xe6\x9d\xf5\xba\x1f\x05\x02c\x99u\x06\x8c+\x1b`\x06\x9e\xf0\xf5a-7Z\xd5\x1aM\x10\t\xe0w\xa3\xe4\x86\xd9f\tEX^t\r3\xd8\xcf\x06\xf3m\xfdD?d\xdfHZc5\x97un\xff\xaf\xbcE\xa8\x9c\xf6\xc7F:\x97\b\xb6\xe1&\x15\xec\x95\x19\x12N[\xacN\x8a\xe1牙\xb1\xac\xed\xa6\xf2$\xa4A\xa0\x8aỶ\xb3Rm'\xd0b\x05\xbb7\x8bQ\x89\xbd\xd2-\xb3K\xe0\xd2\xfe\xf9O\xa7-ћ\xaa\xf0\xa4\x8fJ\x8e\xcd\xf2\x89F!\x19\x0e\x92\xd0\tը\xb3\xb6Q\x96\x89\xffE\x10K\f>%\xf4A\x92\xc07\x1d\xbf(\n]7P{\xb0\r\xc2'V\xbe\xb8\x0e\xb6ViV#\xfc\xaa\xcapx\xaf\r\xea\xfe\xf0va\x89i\x94\x13\x15\xec\xa2\xc6\x00\xc6*\x9d=\xc5\x0e\xcb\"P\xf5|#\xdb\xc9Q\x8e\xf7\xfcΗ\xac\xd4Ȳ\x97,\xa2L\xe1Wp%\xf37\xedc\x8dWݲԚRU8\x98\x0eS\x89\xb8\x81N\xab\x12\x8d9s\xef\x89|$\xc3\xd3q`f\x96\xb0\xe2\xf03\x13]\xc3>\x04\x94)\x1blٲ\xa7P\x1dʏ\x9b\xf5\xf3\x1f\xb7\xa3a8\x89\x19\xac\xb4\x86\xc0\x82Dﴲ\xaaT\x02vh_\x11\xa5\xc7-h\xd5\x015\x81\\ͥ\x01&\xab\x81'\xa4\v\x8ePM\x97\xdc\xf3\xa3\xd90\xd9_'աN\x8f\x1dh\xcb\x0e\xb5\xe5\x11}×\x84\x95dt\xa2\xc4{\xd23\xac\x82\x8a\xe2\t\x06-z,Ū7M8'n@c\xa7Ѡ\xb4c\x11z\xc3\xed\x81IP\xbb߱\xb4\x05lQ\x13\x9bx\xffK%\x0f\xa8-h,U-\xf9\xbf\x06\xde\x06\xac\xf2\x9b\nf\xb1\x0f\a\xc7\xcfc\xb7d\x02\x0eL8\xbc'\xdbA\xcb\xde@#\xed\x02N&\xfc\xfc\x12S\xc0g\xa5\x11\xb8ܫ%4\xd6vf\xf9\xf0Ps\x1b\xc3i\xa9\xda\xd6In\xdf\x1e\xbc\xb9\xf9\xceY\xa5\xcdC\x85\a\x14\x0f\x86\xd7\v\xa6ˆ[,\xad\xd3\xf8\xc0:\xbe\xf0\xa2K\x1fR\x8b\xb6\xfa\x83\xee\x03\xb0y?\x92uv\xd1\xc2\xe7c\xe1\x99\x13\xa0\x90H\xb7\x9c\xf5\xa4A\x8b\xa3\xa1i\x88\xac\xf3\xe5\xafۯ\x10\xb7\xf6\x871\xb5\xbe\xb7\xfb\x91\xd0\x1c\x8f\x80\f\xc6\xe5\x1eu8ĽV\xad牲\xea\x14\x97\xd6\xff(\x05G95\xbfq\xbb\x96[:\xf7\x7f84\x96Ϊ\x80\x95\xcf1`\x87\xe0:\xf2\uea80\xb5\x84\x15kQ\xac\x98\xc1\x1f~\x00di\xb3 \xc3^w\x04iz4]\x1c\xac\x96L\xc4\f\xe7\xc4y\x1d\xdd~\xdbaI\aG\xb6#\"\xbe\xe7}\f \xdfe\xc9\xcab\xc4.\xef\xae\xf4e\xa1\x7f\xbah\"ϧ\x1cM\x14K&\x10\x1b\xa3QX9c\n \xa6!l\xa0\xd1\xd8)í\xd2o\xc48D\xafb\xc6\xe1\x84\xf1\xe9+\x99,Q\\\xd0d\xe5\x17\x01\x97\x15\xd9\x11\x87;G\xf0\x10\x18x\x99\x94\xac\x15\xf9\xc4)\xf3\x86om\x89\x86\xae\xa8AK\x1a\xc9L`\xe1\x12\x8e\xb9\x1d\xa49\xdcT\xab\x9dR\x02\xd9\x14\xefJ÷\x92u\xa6Q\xf6\x82n\xeb=ĕ_\xdf:\xa4\xcdW\xdb\xf5=\xfd\x89\xe3t/\x0e\xbc\xea\x01\x98\x9c\x87\xb2\x9c9\xc8B\x00ZZ\xb4ڮ\xc1\xf4\xe4s#H'\x04\xdb\t\\\x82\xd5n\xae\xd8\xe9kH_d\xbb\x12\xccd\x17L\x14ܦ\xebs\xd7/2\x84ү\xb0\r\x9bB\xcd`q\x8a?\x94\xac'D|HK\xe0\x95\xdb&Ky\xe6\xfeA\x9ft\xb1\x1a\xafV(Y\x9eէO\u0082:j\x7fF\x99\xcd\xf3\xca\xeb{I3\x82\xe5o\xd1,\xb0<}\x13g\xba=\x8f\br\xdaM\xa4<\xa5\x9c\"\a#\x90\xc0\n\\w\xbb\xec\xe4\xe1\\c5\x97y1:\xaf\xcc\xf4X\xe9\x13n;\x03w\xe8\xf3\xadϔQ\xad\x94\xdc\xf3z\xbewZ:\x9e\U000d1cea͂F\xb2%Y\x9cb\x04I\xb2\xf0\xc9\xdd\"\x06\x10J\x93\xf6\xbcv\xfa\x94\xeb\xef9\x8a\xca\xdc\xec\xed\x17\xecᅸ\x80a\x83\x121\xda\xf5P\x95\xe4\xaf\xe1B8\xe3+G\x9a\xcc(\x10bJA\x90x\xe4\xc8\r\xbc{\aJû\xd0Qxw\x1f\xe2\x8f\xe3\xc2.x\x9aDg8\xber!\xe2\xbe7\x85\xa5!\x95\xa6BF\xb9K \xfe\xf7\xc9\xf2\x89\x1d,\xd5W^w\xab\xe0\x95q;\xe4\xae9\x04\x8f\xbc\xcc=\xecpO\xf9\x92F봤ІZS\x06a\x85\x1dz7\x97\x84\x1c\xaf\x8er*\xcdkNu\x81\x1cf\x8eyK\x00\x87\x8c\xa4}U\xee\xe1\xca\xe3mA\x99BL\xe2\b\x00\x8f\xec\xc8C\xc3\xe6\x04\xe0Tw\xac\xb6\xeb\fρ\xa2\xea\xfd+\xe3\x9d\x17\xad\xb1y^]e\a\x12%\x83\xd74\xfc\xda\xf0\xb2\x19\x9f۬F\xf0\xb2\xb0\x17\xf49\xeaMb\x86\xd6\xdbi\xd0\x1c\xc9\xfa\xdbh\xf1\xc4OF\b7Թ\x91\xff\\\xa8\xf3\x89J\xc74\x13\x02\xc5/\\\xa0\t\xfb^\x11\x017s\xaa\xc1\xac\xaeݡ&\xc3\xeeir\xd8\xe0D\f웕\xe4\"\x1djJ\xdaB\xb6\xe9L,\xdcNk\x06٦\xd3|6\x03\xdb\xf9ȹȗ\x10\x935S؛L\xa7\x002\x9d\x1a\xfbbvv\U000fceaa\xcc\xf2\x9d\x9b\xeb\n\xadВ\xedϧtZ\xa3\xb4\xb1Q\xab\xf6\xdfTj\x95\xa1ř6\xb1.\x95's\n\xdf\xcb\xd0U\x02\xff,\x96L\xbe\x91\x16ۨ\xb9\x93?\xb2\v\x94\xbe\xb7Bܰ\x02<\xa0\x04\xf2\r\xc6\x05\x85R\xcf\xd2\x14S\x9a<\xbe\r\\\xfa\xb0\x12\xae_\xbc\x8c\xb1\x89\xd4\xf7h\xbe\x12Z\xf8&\xc1{s\x86\xa7\x8fj\xe4\xa9\x19#\xcc!&\xf6g+fq\x91ezU\xb2\x92\x85\xa1!y\xfb\x82ƉL\xc4\xfe\x81\xc9[\xd82Կ&\x9b\xbc\x9d\xafژ\x01\x06:0\xe9q\xfb\\\x11\xfb\xed\x19]\x8bư\xfaR`\xfd\x1cV\x85\xdeSO\x02lG\x89\xcdX\xb4\xf7\xa6w\xb6\x9b\x02\x86T\xd5%\t\x9eT巗7w\x82o\x92\xa4c\xb6\xb9 Ɇ\xd9&\x02\xcc\xde\t\xe1ifiP_\x15퐼\xe9{eC\xbe\xedpI\xcd\x1c\xd3ly\xb6\xd6M\xfa\xa7\xb7U\x06\xa3\xd7\xdcK\x12\x8f\x16_\xc8L\xfaw\xe4\\^\xb2%\x17'`\xf1\x8f\x04\xab\xe9K\xdf\xfd\xf0p\xc8l\xffRQ6L\xd6\xe4\x10\x92\x82\x9b\x0f\x8e9ƳTc\x94X\x8c\xc5\xff\x7f\xe6\x14\xd9\xeb2\x1b\xf4\x92W\t\xef\xbe=\x95\x8e\xb8\xdd\xf02\xb4\x84\x7f\xff\xe7\xee\xbf\x01\x00\x00\xff\xff\xf8I\x957\xdd!\x00\x00"), } var CRDs = crds() diff --git a/design/Implemented/restore-with-EnableAPIGroupVersions-feature.md b/design/Implemented/restore-with-EnableAPIGroupVersions-feature.md index c5f9e1c15..e5580f948 100644 --- a/design/Implemented/restore-with-EnableAPIGroupVersions-feature.md +++ b/design/Implemented/restore-with-EnableAPIGroupVersions-feature.md @@ -29,7 +29,7 @@ During restore, the proposal is that Velero will determine if the `APIGroupVersi The proposed code starts with creating three lists for each backed up resource. The three lists will be created by (1) reading the directory names in the backup tarball file and seeing which API group versions were backed up from the source cluster, (2) looking at the target cluster and determining which API group versions are supported, and - (3) getting config maps from the target cluster in order to get user-defined prioritization of versions. + (3) getting ConfigMaps from the target cluster in order to get user-defined prioritization of versions. The three lists will be used to create a map of chosen versions for each resource to restore. If there is a user-defined list of priority versions, the versions will be checked against the supported versions lists. The highest user-defined priority version that is/was supported by both target and source clusters will be the chosen version for that resource. If no user specified versions are supported by neither target nor source, the versions will be logged and the restore will continue with other prioritizations. diff --git a/design/merge-patch-and-strategic-in-resource-modifier.md b/design/merge-patch-and-strategic-in-resource-modifier.md index 0259ca642..a8127bc49 100644 --- a/design/merge-patch-and-strategic-in-resource-modifier.md +++ b/design/merge-patch-and-strategic-in-resource-modifier.md @@ -88,10 +88,10 @@ Example of StrategicPatches in ResourceModifierRule version: v1 resourceModifierRules: - conditions: - groupResource: pods - resourceNameRegex: "^my-pod$" - namespaces: - - ns1 + groupResource: pods + resourceNameRegex: "^my-pod$" + namespaces: + - ns1 strategicPatches: - patchData: | { diff --git a/design/node-agent-concurrency.md b/design/node-agent-concurrency.md new file mode 100644 index 000000000..597da8979 --- /dev/null +++ b/design/node-agent-concurrency.md @@ -0,0 +1,131 @@ +# Node-agent Concurrency Design + +## Glossary & Abbreviation + +**Velero Generic Data Path (VGDP)**: VGDP is the collective of modules that is introduced in [Unified Repository design][1]. Velero uses these modules to finish data transfer for various purposes (i.e., PodVolume backup/restore, Volume Snapshot Data Movement). VGDP modules include uploaders and the backup repository. + +## Background + +Velero node-agent is a daemonset hosting controllers and VGDP modules to complete the concrete work of backups/restores, i.e., PodVolume backup/restore, Volume Snapshot Data Movement backup/restore. +For example, node-agent runs DataUpload controllers to watch DataUpload CRs for Volume Snapshot Data Movement backups, so there is one controller instance in each node. One controller instance takes a DataUpload CR and then launches a VGDP instance, which initializes a uploader instance and the backup repository connection, to finish the data transfer. The VGDP instance runs inside the node-agent pod or in a pod associated to the node-agent pod in the same node. + +Varying from the data size, data complexity, resource availability, VGDP may take a long time and remarkable resources (CPU, memory, network bandwidth, etc.). +Technically, VGDP instances are able to run in concurrent regardless of the requesters. For example, a VGDP instance for a PodVolume backup could run in parallel with another VGDP instance for a DataUpload. Then the two VGDP instances share the same resources if they are running in the same node. + +Therefore, in order to gain the optimized performance with the limited resources, it is worthy to configure the concurrent number of VGDP per node. When the resources are sufficient in nodes, users can set a large concurrent number, so as to reduce the backup/restore time; otherwise, the concurrency should be reduced, otherwise, the backup/restore may encounter problems, i.e., time lagging, hang or OOM kill. + +## Goals + +- Define the behaviors of concurrent VGDP instances in node-agent +- Create a mechanism for users to specify the concurrent number of VGDP per node + +## Non-Goals +- VGDP instances from different nodes always run in concurrent since in most common cases the resources are isolated. For special cases that some resources are shared across nodes, there is no support at present +- In practice, restores run in prioritized scenarios, e.g., disaster recovery. However, the current design doesn't consider this difference, a VGDP instance for a restore is blocked if it reaches to the limit of the concurrency, even though the ones block it are for backups. If users do meet some problems here, they should consider to stop the backups first +- Sometimes, users wants to totally block backups/restores from running in a specific node, this is out of the scope the current design. To archive this, more modules need to be considered (i.e., expoers of data movers), simply blocking the VGDP (e.g., by setting its concurrent number to 0) doesn't work. E.g., for a fs backup, VGDP instance must run in the node the source pod is running in, if we simply block from VGDP instance, the PodVolumeBackup CR is still submitted but never processed. + +## Solution + +We introduce a configMap named ```node-agent-configs``` for users to specify the node-agent related configurations. This configMap is not created by Velero, users should create it manually on demand. The configMap should be in the same namespace where Velero is installed. If multiple Velero instances are installed in different namespaces, there should be one configMap in each namespace which applies to node-agent in that namespace only. +Node-agent server checks these configurations at startup time and use it to initiate the related VGDP modules. Therefore, users could edit this configMap any time, but in order to make the changes effective, node-agent server needs to be restarted. +The ```node-agent-configs``` configMap may be used for other purpose of configuring node-agent in future, at present, there is only one kind of configuration as the data in the configMap, the name is ```dataPathConcurrency```. + +The data structure for ```node-agent-configs``` is as below: +```go +type Configs struct { + // DataPathConcurrency is the config for data path concurrency per node. + DataPathConcurrency *DataPathConcurrency `json:"dataPathConcurrency,omitempty"` +} + +type DataPathConcurrency struct { + // GlobalConfig specifies the concurrency number to all nodes for which per-node config is not specified + GlobalConfig int `json:"globalConfig,omitempty"` + + // PerNodeConfig specifies the concurrency number to nodes matched by rules + PerNodeConfig []RuledConfigs `json:"perNodeConfig,omitempty"` +} + +type RuledConfigs struct { + // NodeSelector specifies the label selector to match nodes + NodeSelector metav1.LabelSelector `json:"nodeSelector"` + + // Number specifies the number value associated to the matched nodes + Number int `json:"number"` +} +``` + +### Global concurrent number +We allow users to specify a concurrent number that will be applied to all nodes if the per-node number is not specified. This number is set through ```globalConfig``` field in ```dataPathConcurrency```. +The number starts from 1 which means there is no concurrency, only one instance of VGDP is allowed. There is no roof limit. +If this number is not specified or not valid, a hard-coded default value will be used, the value is set to 1. + +### Per-node concurrent number +We allow users to specify different concurrent number per node, for example, users can set 3 concurrent instances in Node-1, 2 instances in Node-2 and 1 instance in Node-3. This is for below considerations: +- The resources may be different among nodes. Then users could specify smaller concurrent number for nodes with less resources while larger number for the ones with more resources +- Help users to isolate critical environments. Users may run some critical workloads in some specified nodes, since VGDP instances may take large resource consumption, users may want to run less number of instances in the nodes with critical workloads + +The range of Per-node concurrent number is the same with Global concurrent number. +Per-node concurrent number is preferable to Global concurrent number, so it will overwrite the Global concurrent number for that node. + +Per-node concurrent number is implemented through ```perNodeConfig``` field in ```dataPathConcurrency```. + +```perNodeConfig``` is a list of ```RuledConfigs``` each item of which matches one or more nodes by label selectors and specify the concurrent number for the matched nodes. This means, the nodes are identified by labels. + +For example, the ```perNodeConfig`` could have below elements: +``` +"nodeSelector: kubernetes.io/hostname=node1; number: 3" +"nodeSelector: beta.kubernetes.io/instance-type=Standard_B4ms; number: 5" +``` +The first element means the node with host name ```node1``` gets the Per-node concurrent number of 3. +The second element means all the nodes with label ```beta.kubernetes.io/instance-type``` of value ```Standard_B4ms``` get the Per-node concurrent number of 5. +At least one node is expected to have a label with the specified ```RuledConfigs``` element (rule). If no node is with this label, the Per-node rule makes no effect. +If one node falls into more than one rules, e.g., if node1 also has the label ```beta.kubernetes.io/instance-type=Standard_B4ms```, the smallest number (3) will be used. + +### Sample +A sample of the ```node-agent-configs``` configMap is as below: +```json +{ + "dataPathConcurrency": { + "globalConfig": 2, + "perNodeConfig": [ + { + "nodeSelector": { + "matchLabels": { + "kubernetes.io/hostname": "node1" + } + }, + "number": 3 + }, + { + "nodeSelector": { + "matchLabels": { + "beta.kubernetes.io/instance-type": "Standard_B4ms" + } + }, + "number": 5 + } + ] + } +} +``` +To create the configMap, users need to save something like the above sample to a json file and then run below command: +``` +kubectl create cm node-agent-configs -n velero --from-file= +``` + +### Global data path manager +As for the code implementation, data path manager is to maintain the total number of the running VGDP instances and ensure the limit is not excceeded. At present, there is one data path manager instance per controller, as a result, the concurrent numbers are calculated separately for each controller. This doesn't help to limit the concurrency among different requesters. +Therefore, we need to create one global data path manager instance server-wide, and pass it to different controllers. The instance will be created at node-agent server startup. +The concurrent number is required to initiate a data path manager, the number comes from either Per-node concurrent number or Global concurrent number. +Below are some prototypes related to data path manager: + +```go +func NewManager(cocurrentNum int) *Manager +func (m *Manager) CreateFileSystemBR(jobName string, requestorType string, ctx context.Context, client client.Client, namespace string, callbacks Callbacks, log logrus.FieldLogger) (AsyncBR, error) +``` + + + + + +[1]: Implemented/unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md \ No newline at end of file diff --git a/design/pv_backup_info.md b/design/pv_backup_info.md new file mode 100644 index 000000000..771f8255c --- /dev/null +++ b/design/pv_backup_info.md @@ -0,0 +1,186 @@ +# PersistentVolume backup information design + +## Abstract +Create a new metadata file in the backup repository's backup name sub-directory to store the backup-including PVC and PV information. The information includes the way of backing up the PVC and PV data, snapshot information, and status. The needed snapshot status can also be recorded there, but the Velero-Native snapshot plugin doesn't provide a way to get the snapshot size from the API, so it's possible that not all snapshot size information is available. + +This new additional metadata file is needed when: +* Get a summary of the backup's PVC and PV information, including how the data in them is backed up, or whether the data in them is skipped from backup. +* Find out how the PVC and PV should be restored in the restore process. +* Retrieve the PV's snapshot information for backup. + +## Background +There is already a [PR](https://github.com/vmware-tanzu/velero/pull/6496) to track the skipped PVC in the backup. This design will depend on it and go further to get a summary of PVC and PV information, then persist into a metadata file in the backup repository. + +In the restore process, the Velero server needs to decide how the PV resource should be restored according to how the PV is backed up. The current logic is to check whether it's backed up by Velero-native snapshot, by file-system backup, or having `DeletionPolicy` set as `Delete`. + +The checks are made by the backup-generated PVBs or Snapshots. There is no generic way to find this information, and the CSI backup and Snapshot data movement backup are not covered. + +Another thing that needs noticing is when describing the backup, there is no generic way to find the PV's snapshot information. + +## Goals +- Create a new metadata file to store backup's PVCs and PVs information and volume data backing up method. The file can be used to let downstream consumers generate a summary. +- Create a generic way to let the Velero server know how the PV resources are backed up. +- Create a generic way to let the Velero server find the PV corresponding snapshot information. + +## Non Goals +- Unify how to get snapshot size information for all PV backing-up methods, and all other currently not ready PVs' information. + +## High-Level Design +Create _backup-name_-volumes-info.json metadata file in the backup's repository. This file will be encoded to contain all the PVC and PV information included in the backup. The information covers whether the PV or PVC's data is skipped during backup, how its data is backed up, and the backed-up detail information. + +Please notice that the new metadata file includes all skipped volume information. This is used to address [the second phase needs of skipped volumes information](https://github.com/vmware-tanzu/velero/issues/5834#issuecomment-1526624211). + +The `restoreItem` function can decode the _backup-name_-volumes-info.json file to determine how to handle the PV resource. + +## Detailed Design + +### The VolumeInfo structure +_backup-name_-volumes-info.json file is a structure that contains an array of structure `VolumeInfo`. + +``` golang +type VolumeInfo struct { + PVCName string // The PVC's name. + PVCNamespace string // The PVC's namespace. + PVName string // The PV name. + BackupMethod string // The way the volume data is backed up. The valid value includes `VeleroNativeSnapshot`, `PodVolumeBackup` and `CSISnapshot`. + SnapshotDataMoved bool // Whether the volume's snapshot data is moved to specified storage. + + Skipped boolean // Whether the Volume is skipped in this backup. + SkippedReason string // The reason for the volume is skipped in the backup. + StartTimestamp *metav1.Time // Snapshot starts timestamp. + + OperationID string // The Async Operation's ID. + + CSISnapshotInfo CSISnapshotInfo + SnapshotDataMovementInfo SnapshotDataMovementInfo + NativeSnapshotInfo VeleroNativeSnapshotInfo + PVBInfo PodVolumeBackupInfo + PVInfo PVInfo +} + +// CSISnapshotInfo is used for displaying the CSI snapshot status +type CSISnapshotInfo struct { + SnapshotHandle string // It's the storage provider's snapshot ID for CSI. + Size int64 // The snapshot corresponding volume size. Some of the volume backup methods cannot retrieve the data by current design, for example, the Velero native snapshot. + + Driver string // The name of the CSI driver. + VSCName string // The name of the VolumeSnapshotContent. +} + +// SnapshotDataMovementInfo is used for displaying the snapshot data mover status. +type SnapshotDataMovementInfo struct { + DataMover string // The data mover used by the backup. The valid values are `velero` and ``(equals to `velero`). + UploaderType string // The type of the uploader that uploads the snapshot data. The valid values are `kopia` and `restic`. It's useful for file-system backup and snapshot data mover. + RetainedSnapshot string // The name or ID of the snapshot associated object(SAO). SAO is used to support local snapshots for the snapshot data mover, e.g. it could be a VolumeSnapshot for CSI snapshot data moign/pv_backup_info. + SnapshotHandle string // It's the filesystem repository's snapshot ID. + +} + +// VeleroNativeSnapshotInfo is used for displaying the Velero native snapshot status. +type VeleroNativeSnapshotInfo struct { + SnapshotHandle string // It's the storage provider's snapshot ID for the Velero-native snapshot. + Size int64 // The snapshot corresponding volume size. Some of the volume backup methods cannot retrieve the data by current design, for example, the Velero native snapshot. + + VolumeType string // The cloud provider snapshot volume type. + VolumeAZ string // The cloud provider snapshot volume's availability zones. + IOPS string // The cloud provider snapshot volume's IOPS. +} + +// PodVolumeBackupInfo is used for displaying the PodVolumeBackup snapshot status. +type PodVolumeBackupInfo struct { + SnapshotHandle string // It's the file-system uploader's snapshot ID for PodVolumeBackup. + Size int64 // The snapshot corresponding volume size. Some of the volume backup methods cannot retrieve the data by current design, for example, the Velero native snapshot. + + UploaderType string // The type of the uploader that uploads the data. The valid values are `kopia` and `restic`. It's useful for file-system backup and snapshot data mover. + VolumeName string // The PVC's corresponding volume name used by Pod: https://github.com/kubernetes/kubernetes/blob/e4b74dd12fa8cb63c174091d5536a10b8ec19d34/pkg/apis/core/types.go#L48 + PodName string // The Pod name mounting this PVC. The format should be /. + NodeName string // The PVB-taken k8s node's name. +} + +// PVInfo is used to store some PV information modified after creation. +// Those information are lost after PV recreation. +type PVInfo struct { + ReclaimPolicy string // ReclaimPolicy of PV. It could be different from the referenced StorageClass. + Labels map[string]string // The PV's labels should be kept after recreation. +} +``` + +### How the VolumeInfo array is generated. +The function `persistBackup` has `backup *pkgbackup.Request` in parameters. +From it, the `VolumeSnapshots`, `PodVolumeBackups`, `CSISnapshots`, `itemOperationsList`, and `SkippedPVTracker` can be read. All of them will be iterated and merged into the `VolumeInfo` array, and then persisted into backup repository in function `persistBackup`. + +Please notice that the change happened in async operations are not reflected in the new metadata file. The file only covers the volume changes happen in the Velero server process scope. + +A new methods are added to BackupStore to download the VolumeInfo metadata file. +Uploading the metadata file is covered in the exiting `PutBackup` method. + +``` golang +type BackupStore interface { + ... + GetVolumeInfos(name string) ([]*VolumeInfo, error) + ... +} +``` + +### How the VolumeInfo array is used. + +#### Generate the PVC backed-up information summary +The downstream tools can use this VolumeInfo array to format and display their volume information. This is in the scope of this feature. + +#### Retrieve volume backed-up information for `velero backup describe` command +The `velero backup describe` can also use this VolumeInfo array structure to display the volume information. The snapshot data mover volume should use this structure at first, then the Velero native snapshot, CSI snapshot, and PodVolumeBackup can also use this structure. The detailed implementation is also not in this feature's scope. + +#### Let restore know how to restore the PV +In the function `restoreItem`, it will determine whether to restore the PV resource by checking it in the Velero native snapshots list, PodVolumeBackup list, and its DeletionPolicy. This logic is still kept. The logic will be used when the new `VolumeInfo` metadata cannot be found to support backward compatibility. + +``` golang + if groupResource == kuberesource.PersistentVolumes { + switch { + case hasSnapshot(name, ctx.volumeSnapshots): + ... + case hasPodVolumeBackup(obj, ctx): + ... + case hasDeleteReclaimPolicy(obj.Object): + ... + default: + ... +``` + +After introducing the VolumeInfo array, the following logic will be added. +``` golang + if groupResource == kuberesource.PersistentVolumes { + volumeInfo := GetVolumeInfo(pvName) + switch volumeInfo.BackupMethod { + case VeleroNativeSnapshot: + ... + case PodVolumeBackup: + ... + case CSISnapshot: + ... + default: + // Need to check whether the volume is backed up by the SnapshotDataMover. + if volumeInfo.SnapshotDataMovement: + + // Check whether the Velero server should restore the PV depending on the DeletionPolicy setting. + if volumeInfo.Skipped: +``` + +### How the VolumeInfo metadata file is deleted +_backup-name_-volumes-info.json file is deleted during backup deletion. + +## Alternatives Considered +The restore process needs more information about how the PVs are backed up to determine whether this PV should be restored. The released branches also need a similar function, but backporting a new feature into previous releases may not be a good idea, so according to [Anshul Ahuja's suggestion](https://github.com/vmware-tanzu/velero/issues/6595#issuecomment-1731081580), adding more cases here to support checking PV backed-up by CSI plugin and CSI snapshot data mover: https://github.com/vmware-tanzu/velero/blob/5ff5073cc3f364bafcfbd26755e2a92af68ba180/pkg/restore/restore.go#L1206-L1324. + +## Security Considerations +There should be no security impact introduced by this design. + +## Compatibility +After this design is implemented, there should be no impact on the existing [skipped PVC summary feature](https://github.com/vmware-tanzu/velero/pull/6496). + +To support older version backup, which doesn't have the VolumeInfo metadata file, the old logic, which is checking the Velero native snapshots list, PodVolumeBackup list, and PVC DeletionPolicy, is still kept, and supporting CSI snapshots and snapshot data mover logic will be added too. + +## Implementation +This will be implemented in the Velero v1.13 development cycle. + +## Open Issues +There are no open issues identified by now. diff --git a/design/velero-uploader-configuration.md b/design/velero-uploader-configuration.md new file mode 100644 index 000000000..b565ba7ef --- /dev/null +++ b/design/velero-uploader-configuration.md @@ -0,0 +1,126 @@ +# Velero Uploader Configuration Integration and Extensibility + +## Abstract +This design proposal aims to make Velero Uploader configurable by introducing a structured approach for managing Uploader settings. we will define and standardize a data structure to facilitate future additions to Uploader configurations. This enhancement provides a template for extending Uploader-related options. And also includes examples of adding sub-options to the Uploader Configuration. + +## Background +Velero is widely used for backing up and restoring Kubernetes clusters. In various scenarios, optimizing the backup process is essential, future needs may arise for adding more configuration options related to the Uploader component especially when dealing with large datasets. Therefore, a standardized configuration template is required. + +## Goals +1. **Extensible Uploader Configuration**: Provide an extensible approach to manage Uploader configurations, making it easy to add and modify configuration options related to the Velero uploader. +2. **User-friendliness**: Ensure that the new Uploader configuration options are easy to understand and use for Velero users without introducing excessive complexity. + +## Non Goals +1. Expanding to other Velero components: The primary focus of this design is Uploader configuration and does not include extending to other components or modules within Velero. Configuration changes for other components may require separate design and implementation. + +## High-Level Design +To achieve extensibility in Velero Uploader configurations, the following key components and changes are proposed: + +### UploaderConfig Structure +A new data structure, `UploaderConfig`, will be defined to store Uploader configurations. This structure will include the configuration options related to backup and restore for Uploader: + +```go +type UploaderConfig struct { + // sub-options +} +``` + +### Integration with Backup CRD +The Velero CLI will support an uploader configuration-related flag, allowing users to set the value when creating backups or restores. This value will be stored in the `UploaderConfig` field within the `Backup` CRD and `Restore` CRD: + +```go +type BackupSpec struct { + UploaderConfig shared.UploaderConfig `json:"uploaderConfig,omitempty"` +} + +type RestoreSpec struct { + UploaderConfig shared.UploaderConfig `json:"uploaderConfig,omitempty"` +} +``` + +### Configuration Propagated to Different CRDs +The configuration specified in `UploaderConfig` needs to be effective for backup and restore both by file system way and data-mover way. +Therefore, the `UploaderConfig` field value from the `Backup` CRD should be propagated to `PodVolumeBackup` and `DataUpload` CRDs: + +```go +type PodVolumeBackupSpec struct { + ... + UploaderConfig shared.UploaderConfig `json:"uploaderConfig,omitempty"` +} + +type DataUploadSpec struct { + ... + UploaderConfig shared.UploaderConfig `json:"uploaderConfig,omitempty"` +} +``` + +Also the `UploaderConfig` field value from the `Restore` CRD should be propagated to `PodVolumeRestore` and `DataDownload` CRDs: + +```go +type PodVolumeRestoreSpec struct { + ... + UploaderConfig shared.UploaderConfig `json:"uploaderConfig,omitempty"` +} + +type DataDownloadSpec struct { + ... + UploaderConfig shared.UploaderConfig `json:"uploaderConfig,omitempty"` +} +``` + +## Sub-options in UploaderConfig +Adding fields above in CRDs can accommodate any future additions to Uploader configurations by adding new fields to the `UploaderConfig` structure. + +### Parallel Files Upload +This section focuses on enabling the configuration for the number of parallel file uploads during backups. +below are the key steps that should be added to support this new feature. + +#### Velero CLI +The Velero CLI will support a `--parallel-files-upload` flag, allowing users to set the `ParallelFilesUpload` value when creating backups. + +#### UploaderConfig +below the sub-option `ParallelFilesUpload` is added into UploaderConfig: + +```go +type UploaderConfig struct { + ParallelFilesUpload int `json:"parallelFilesUpload,omitempty"` +} +``` + +#### Kopia Parallel Upload Policy +Velero Uploader can set upload policies when calling Kopia APIs. In the Kopia codebase, the structure for upload policies is defined as follows: + +```go +// UploadPolicy describes the policy to apply when uploading snapshots. +type UploadPolicy struct { + ... + MaxParallelFileReads *OptionalInt `json:"maxParallelFileReads,omitempty"` +} +``` + +Velero can set the `MaxParallelFileReads` parameter for Kopia's upload policy as follows: + +```go +curPolicy := getDefaultPolicy() +if uploaderCfg.ParallelFilesUpload > 0 { + curPolicy.UploadPolicy.MaxParallelFileReads = newOptionalInt(uploaderCfg.ParallelFilesUpload) +} +``` + +#### Restic Parallel Upload Policy +As Restic does not support parallel file upload, the configuration would not take effect, so we should output a warning when the user sets the `ParallelFilesUpload` value by using Restic to do a backup. + +```go +if uploaderCfg.ParallelFilesUpload > 0 { + log.Warnf("ParallelFilesUpload is set to %d, but Restic does not support parallel file uploads. Ignoring", uploaderCfg.ParallelFilesUpload) + } +``` + +Roughly, the process is as follows: +1. Users pass the ParallelFilesUpload parameter and its value through the Velero CLI. This parameter and its value are stored as a sub-option within UploaderConfig and then placed into the Backup CR. +2. When users perform file system backups, UploaderConfig is passed to the PodVolumeBackup CR. When users use the Data-mover for backups, it is passed to the DataUpload CR. +3. Each respective controller within the CRs calls the uploader, and the ParallelFilesUpload from UploaderConfig in CRs is passed to the uploader. +4. When the uploader subsequently calls the Kopia API, it can use the ParallelFilesUpload to set the MaxParallelFileReads parameter, and if the uploader calls the Restic command it would output one warning log for Restic does not support this feature. + +## Alternatives Considered +To enhance extensibility further, the option of storing `UploaderConfig` in a Kubernetes ConfigMap can be explored, this approach would allow the addition and modification of configuration options without the need to modify the CRD. \ No newline at end of file diff --git a/design/volume-snapshot-data-movement/volume-snapshot-data-movement.md b/design/volume-snapshot-data-movement/volume-snapshot-data-movement.md index 55796b81f..d04a6d5b3 100644 --- a/design/volume-snapshot-data-movement/volume-snapshot-data-movement.md +++ b/design/volume-snapshot-data-movement/volume-snapshot-data-movement.md @@ -626,10 +626,9 @@ Therefore, we have below principles: We will address the two principles step by step. As the first step, VBDM’s parallelism is designed as below: - We don’t create the load balancing mechanism for the first step, we don’t detect the accessibility of the volume/volume snapshot explicitly. Instead, we create the backupPod/restorePod under the help of Kubernetes, Kubernetes schedules the backupPod/restorePod to the appropriate node, then the data movement controller on that node will handle the DataUpload/DataDownload CR there, so the resource will be consumed from that node. -- We don’t expose the configurable concurrency value in one node, instead, the concurrency value in value will be set to 1, that is, there is no concurrency in one node. +- We expose the configurable concurrency value per node, for details of how the concurrency number constraints various backups and restores which share VGDP, check the [node-agent concurrency design][3]. As for the resource consumption, it is related to the data scale of the data movement activity and it is charged to node-agent pods, so users should configure enough resource to node-agent pods. -Meanwhile, Pod Volume Backup/Restore are also running in node-agent pods, we don’t restrict the concurrency of these two types. For example, in one node, one Pod Volume Backup and one DataUpload could run at the same time, in this case, the resource will be shared by the two activities. ## Progress Report When a DUCR/DDCR is in InProgress phase, users could check the progress. @@ -666,6 +665,9 @@ At present, VBDM doesn't support recovery, so it will follow the second rule. ## Kopia For Block Device To work with block devices, VGDP will be updated. Today, when Kopia attempts to create a snapshot of the block device, it will error because kopia does not support this file type. Kopia does have a nice set of interfaces that are able to be extended though. +**Notice** +The Kopia block mode uploader only supports non-Windows platforms, because the block mode code invokes some system calls that are not present in the Windows platform. + To achieve the necessary information to determine the type of volume that is being used, we will need to pass in the volume mode in provider interface. ```go @@ -967,5 +969,6 @@ Restore command is kept as is. -[1]: ../unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md -[2]: ../general-progress-monitoring.md \ No newline at end of file +[1]: ../Implemented/unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md +[2]: ../Implemented/general-progress-monitoring.md +[3]: ../node-agent-concurrency.md \ No newline at end of file diff --git a/go.mod b/go.mod index ae0bcf348..f62f2cfb2 100644 --- a/go.mod +++ b/go.mod @@ -5,10 +5,10 @@ go 1.21 toolchain go1.21.3 require ( - cloud.google.com/go/storage v1.32.0 + cloud.google.com/go/storage v1.33.0 github.com/Azure/azure-pipeline-go v0.2.3 github.com/Azure/azure-sdk-for-go v67.2.0+incompatible - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.3.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0 @@ -16,7 +16,11 @@ require ( github.com/Azure/go-autorest/autorest v0.11.27 github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 github.com/Azure/go-autorest/autorest/to v0.3.0 - github.com/aws/aws-sdk-go v1.44.256 + github.com/aws/aws-sdk-go-v2 v1.21.0 + github.com/aws/aws-sdk-go-v2/config v1.18.42 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.87 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.123.0 + github.com/aws/aws-sdk-go-v2/service/s3 v1.40.0 github.com/bombsimon/logrusr/v3 v3.0.0 github.com/evanphx/json-patch v5.6.0+incompatible github.com/fatih/color v1.15.0 @@ -32,7 +36,7 @@ require ( github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.20.1 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.16.0 + github.com/prometheus/client_golang v1.17.0 github.com/robfig/cron v1.1.0 github.com/sirupsen/logrus v1.9.3 github.com/spf13/afero v1.6.0 @@ -40,14 +44,14 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.4 github.com/vmware-tanzu/crash-diagnostics v0.3.7 - go.uber.org/zap v1.25.0 + go.uber.org/zap v1.26.0 golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 - golang.org/x/mod v0.12.0 + golang.org/x/mod v0.13.0 golang.org/x/net v0.17.0 - golang.org/x/oauth2 v0.11.0 + golang.org/x/oauth2 v0.13.0 golang.org/x/text v0.13.0 - google.golang.org/api v0.138.0 - google.golang.org/grpc v1.57.0 + google.golang.org/api v0.146.0 + google.golang.org/grpc v1.58.3 google.golang.org/protobuf v1.31.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.25.6 @@ -60,11 +64,12 @@ require ( k8s.io/metrics v0.25.6 k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed sigs.k8s.io/controller-runtime v0.12.2 + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd sigs.k8s.io/yaml v1.3.0 ) require ( - cloud.google.com/go v0.110.6 // indirect + cloud.google.com/go v0.110.7 // indirect cloud.google.com/go/compute v1.23.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/iam v1.1.1 // indirect @@ -77,6 +82,21 @@ require ( github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.13.40 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.11 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.43 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.14 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.36 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.4 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.14.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.22.0 // indirect + github.com/aws/smithy-go v1.14.2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chmduquesne/rollinghash v4.0.0+incompatible // indirect @@ -99,8 +119,8 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/gnostic v0.6.9 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/s2a-go v0.1.5 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.1 // indirect github.com/googleapis/gax-go/v2 v2.12.0 // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb // indirect @@ -109,7 +129,7 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.16.7 // indirect + github.com/klauspost/compress v1.17.0 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/klauspost/pgzip v1.2.6 // indirect github.com/klauspost/reedsolomon v1.11.8 // indirect @@ -135,38 +155,37 @@ require ( github.com/pierrec/lz4 v2.6.1+incompatible // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.4.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.10.1 // indirect + github.com/prometheus/procfs v0.11.1 // indirect github.com/rs/xid v1.5.0 // indirect github.com/stretchr/objx v0.5.0 // indirect github.com/vladimirvivien/gexe v0.1.1 // indirect github.com/zeebo/blake3 v0.2.3 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel v1.17.0 // indirect - go.opentelemetry.io/otel/metric v1.17.0 // indirect - go.opentelemetry.io/otel/trace v1.17.0 // indirect + go.opentelemetry.io/otel v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/otel/trace v1.19.0 // indirect go.starlark.net v0.0.0-20201006213952-227f4aabceb5 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.14.0 // indirect - golang.org/x/sync v0.3.0 // indirect + golang.org/x/sync v0.4.0 // indirect golang.org/x/sys v0.13.0 // indirect golang.org/x/term v0.13.0 // indirect golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 // indirect + google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect k8s.io/component-base v0.24.2 // indirect k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect - sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect ) -replace github.com/kopia/kopia => github.com/project-velero/kopia v0.0.0-20230918080509-48b07dfffc74 +replace github.com/kopia/kopia => github.com/project-velero/kopia v0.0.0-20231023031817-cf7bbc7f8519 diff --git a/go.sum b/go.sum index 32c7252f6..909063632 100644 --- a/go.sum +++ b/go.sum @@ -19,8 +19,8 @@ cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmW cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.110.6 h1:8uYAkj3YHTP/1iwReuHPxLSbdcyc+dSBbzFMrVwDR6Q= -cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o= +cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -45,15 +45,15 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.32.0 h1:5w6DxEGOnktmJHarxAOUywxVW9lbNWIzlzzUltG/3+o= -cloud.google.com/go/storage v1.32.0/go.mod h1:Hhh/dogNRGca7IWv1RC2YqEn0c0G77ctA/OxflYkiD8= +cloud.google.com/go/storage v1.33.0 h1:PVrDOkIC8qQVa1P3SXGpQvfuJhN2LHOoyZvWs8D2X5M= +cloud.google.com/go/storage v1.33.0/go.mod h1:Hhh/dogNRGca7IWv1RC2YqEn0c0G77ctA/OxflYkiD8= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= github.com/Azure/azure-sdk-for-go v67.2.0+incompatible h1:Uu/Ww6ernvPTrpq31kITVTIm/I5jlJ1wjtEH/bmSB2k= github.com/Azure/azure-sdk-for-go v67.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1 h1:/iHxaJhsFr0+xVFfbMr5vxz848jyiWuIEDhYq3y5odY= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 h1:9kDVnTz3vbfweTqAUmk/a/pH5pWFCHtvRpHYC0G/dcA= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1 h1:LNHhpdK7hzUcx/k1LIcuh5k7k1LGIWLQfCjaneSj7Fc= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1/go.mod h1:uE9zaUfEQT/nbQjVi2IblCG9iaLtZsuYZ8ne+PuQ02M= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= @@ -140,12 +140,48 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.44.256 h1:O8VH+bJqgLDguqkH/xQBFz5o/YheeZqgcOYIgsTVWY4= -github.com/aws/aws-sdk-go v1.44.256/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go-v2 v1.21.0 h1:gMT0IW+03wtYJhRqTVYn0wLzwdnK9sRMcxmtfGzRdJc= +github.com/aws/aws-sdk-go-v2 v1.21.0/go.mod h1:/RfNgGmRxI+iFOB1OeJUyxiU+9s88k3pfHvDagGEp0M= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13 h1:OPLEkmhXf6xFPiz0bLeDArZIDx1NNS4oJyG4nv3Gct0= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13/go.mod h1:gpAbvyDGQFozTEmlTFO8XcQKHzubdq0LzRyJpG6MiXM= +github.com/aws/aws-sdk-go-v2/config v1.18.42 h1:28jHROB27xZwU0CB88giDSjz7M1Sba3olb5JBGwina8= +github.com/aws/aws-sdk-go-v2/config v1.18.42/go.mod h1:4AZM3nMMxwlG+eZlxvBKqwVbkDLlnN2a4UGTL6HjaZI= +github.com/aws/aws-sdk-go-v2/credentials v1.13.40 h1:s8yOkDh+5b1jUDhMBtngF6zKWLDs84chUk2Vk0c38Og= +github.com/aws/aws-sdk-go-v2/credentials v1.13.40/go.mod h1:VtEHVAAqDWASwdOqj/1huyT6uHbs5s8FUHfDQdky/Rs= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.11 h1:uDZJF1hu0EVT/4bogChk8DyjSF6fof6uL/0Y26Ma7Fg= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.11/go.mod h1:TEPP4tENqBGO99KwVpV9MlOX4NSrSLP8u3KRy2CDwA8= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.87 h1:e20ZrsgDPUXqg8+rZVuPwNSp6yniUN2Yr2tzFZ+Yvl0= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.87/go.mod h1:0i0TAT6W+5i48QTlDU2KmY6U2hBZeY/LCP0wktya2oc= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41 h1:22dGT7PneFMx4+b3pz7lMTRyN8ZKH7M2cW4GP9yUS2g= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41/go.mod h1:CrObHAuPneJBlfEJ5T3szXOUkLEThaGfvnhTf33buas= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35 h1:SijA0mgjV8E+8G45ltVHs0fvKpTj8xmZJ3VwhGKtUSI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35/go.mod h1:SJC1nEVVva1g3pHAIdCp7QsRIkMmLAgoDquQ9Rr8kYw= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.43 h1:g+qlObJH4Kn4n21g69DjspU0hKTjWtq7naZ9OLCv0ew= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.43/go.mod h1:rzfdUlfA+jdgLDmPKjd3Chq9V7LVLYo1Nz++Wb91aRo= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.4 h1:6lJvvkQ9HmbHZ4h/IEwclwv2mrTW8Uq1SOB/kXy0mfw= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.4/go.mod h1:1PrKYwxTM+zjpw9Y41KFtoJCQrJ34Z47Y4VgVbfndjo= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.123.0 h1:AkjjaINgZQlz3valIVmlrs18jsl+fzYuoxED8oOVrYo= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.123.0/go.mod h1:0FhI2Rzcv5BNM3dNnbcCx2qa2naFZoAidJi11cQgzL0= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.14 h1:m0QTSI6pZYJTk5WSKx3fm5cNW/DCicVzULBgU/6IyD0= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.14/go.mod h1:dDilntgHy9WnHXsh7dDtUPgHKEfTJIBUTHM8OWm0f/0= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.36 h1:eev2yZX7esGRjqRbnVk1UxMLw4CyVZDpZXRCcy75oQk= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.36/go.mod h1:lGnOkH9NJATw0XEPcAknFBj3zzNTEGRHtSw+CwC1YTg= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35 h1:CdzPW9kKitgIiLV1+MHobfR5Xg25iYnyzWZhyQuSlDI= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35/go.mod h1:QGF2Rs33W5MaN9gYdEQOBBFPLwTZkEhRwI33f7KIG0o= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.4 h1:v0jkRigbSD6uOdwcaUQmgEwG1BkPfAPDqaeNt/29ghg= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.4/go.mod h1:LhTyt8J04LL+9cIt7pYJ5lbS/U98ZmXovLOR/4LUsk8= +github.com/aws/aws-sdk-go-v2/service/s3 v1.40.0 h1:wl5dxN1NONhTDQD9uaEvNsDRX29cBmGED/nl0jkWlt4= +github.com/aws/aws-sdk-go-v2/service/s3 v1.40.0/go.mod h1:rDGMZA7f4pbmTtPOk5v5UM2lmX6UAbRnMDJeDvnH7AM= +github.com/aws/aws-sdk-go-v2/service/sso v1.14.1 h1:YkNzx1RLS0F5qdf9v1Q8Cuv9NXCL2TkosOxhzlUPV64= +github.com/aws/aws-sdk-go-v2/service/sso v1.14.1/go.mod h1:fIAwKQKBFu90pBxx07BFOMJLpRUGu8VOzLJakeY+0K4= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.1 h1:8lKOidPkmSmfUtiTgtdXWgaKItCZ/g75/jEk6Ql6GsA= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.1/go.mod h1:yygr8ACQRY2PrEcy3xsUI357stq2AxnFM6DIsR9lij4= +github.com/aws/aws-sdk-go-v2/service/sts v1.22.0 h1:s4bioTgjSFRwOoyEFzAVCmFmoowBgjTR8gkrF/sQ4wk= +github.com/aws/aws-sdk-go-v2/service/sts v1.22.0/go.mod h1:VC7JDqsqiwXukYEDjoHh9U0fOJtNWh04FPQz4ct4GGU= +github.com/aws/smithy-go v1.14.2 h1:MJU9hqBGbvWZdApzpvoF2WAIJDbtjK2NDJSiJP7HblQ= +github.com/aws/smithy-go v1.14.2/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= -github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -175,11 +211,7 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= @@ -233,7 +265,6 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -375,6 +406,7 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -399,8 +431,8 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.5 h1:8IYp3w9nysqv3JH+NJgXJzGbDHzLOTj43BmSkp+O7qg= -github.com/google/s2a-go v0.1.5/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -408,8 +440,8 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM= -github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= +github.com/googleapis/enterprise-certificate-proxy v0.3.1 h1:SBWmZhjUDRorQxrN0nwzf+AHBxnbFjViHQS4P0yVpmQ= +github.com/googleapis/enterprise-certificate-proxy v0.3.1/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= @@ -499,8 +531,8 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= -github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= +github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= @@ -511,8 +543,8 @@ github.com/klauspost/reedsolomon v1.11.8 h1:s8RpUW5TK4hjr+djiOpbZJB4ksx+TdYbRH7v github.com/klauspost/reedsolomon v1.11.8/go.mod h1:4bXRN+cVzMdml6ti7qLouuYi32KHJ5MGv0Qd8a47h6A= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kopia/htmluibuild v0.0.1-0.20230917154246-98806054261e h1:XogFUFI4mcT5qyywKiGY5WqLi7l4b/eMi7BlEzgLTd0= -github.com/kopia/htmluibuild v0.0.1-0.20230917154246-98806054261e/go.mod h1:cSImbrlwvv2phvj5RfScL2v08ghX6xli0PcK6f+t8S0= +github.com/kopia/htmluibuild v0.0.1-0.20231019063300-75c2a788c7d0 h1:TvupyyfbUZzsO4DQJpQhKZnUa61xERcJ+ejCbHWG2NY= +github.com/kopia/htmluibuild v0.0.1-0.20231019063300-75c2a788c7d0/go.mod h1:cSImbrlwvv2phvj5RfScL2v08ghX6xli0PcK6f+t8S0= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -639,22 +671,22 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/project-velero/kopia v0.0.0-20230918080509-48b07dfffc74 h1:dLMdxdRegzp3DhTa7nc9c37n8cukHLIK8SXRTOh/3pg= -github.com/project-velero/kopia v0.0.0-20230918080509-48b07dfffc74/go.mod h1:eFPSn8Dy0XlRUbIDZjEaDKmFdJMh5lySvtF6eyl5j/s= +github.com/project-velero/kopia v0.0.0-20231023031817-cf7bbc7f8519 h1:DiikAMR1wBIY6oFoN76WEJz4f+6OM99ZiGzZ9m9v32I= +github.com/project-velero/kopia v0.0.0-20231023031817-cf7bbc7f8519/go.mod h1:V/zpEMjxzqEf3lF52m0b0nAIVQGolPYIOXRjVxeK1j0= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -669,8 +701,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= +github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/robfig/cron v1.1.0 h1:jk4/Hud3TTdcrJgUOBgsqrZBarcxl6ADIjSC2iniwLY= github.com/robfig/cron v1.1.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= @@ -763,7 +795,6 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zalando/go-keyring v0.2.3 h1:v9CUu9phlABObO4LPWycf+zwMG7nlbb3t/B5wa97yms= github.com/zalando/go-keyring v0.2.3/go.mod h1:HL4k+OXQfJUWaMnqyuSOc0drfGPX2b51Du6K+MRgZMk= github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY= @@ -800,19 +831,19 @@ go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUz go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel v1.17.0 h1:MW+phZ6WZ5/uk2nd93ANk/6yJ+dVrvNWUjGhnnFU5jM= -go.opentelemetry.io/otel v1.17.0/go.mod h1:I2vmBGtFaODIVMBSTPVDlJSzBDNf93k60E6Ft0nyjo0= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/metric v1.17.0 h1:iG6LGVz5Gh+IuO0jmgvpTB6YVrCGngi8QGm+pMd8Pdc= -go.opentelemetry.io/otel/metric v1.17.0/go.mod h1:h4skoxdZI17AxwITdmdZjjYJQH5nzijUUjm+wtPph5o= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= -go.opentelemetry.io/otel/trace v1.17.0 h1:/SWhSRHmDPOImIAetP1QAeMnZYiQXrTy4fMMYOdSKWQ= -go.opentelemetry.io/otel/trace v1.17.0/go.mod h1:I/4vKTgFclIsXRVucpH25X0mpFSczM7aHeaz0ZBLWjY= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= go.starlark.net v0.0.0-20201006213952-227f4aabceb5 h1:ApvY/1gw+Yiqb/FKeks3KnVPWpkR3xzij82XPKLjJVw= @@ -830,8 +861,8 @@ go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN8 go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= -go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -849,7 +880,6 @@ golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5 golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -890,9 +920,8 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= +golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -948,8 +977,6 @@ golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -967,8 +994,8 @@ golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= -golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= +golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= +golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -980,9 +1007,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1059,11 +1085,8 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= @@ -1072,7 +1095,6 @@ golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXR golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1084,8 +1106,6 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1160,7 +1180,6 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1191,8 +1210,8 @@ google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjR google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= -google.golang.org/api v0.138.0 h1:K/tVp05MxNVbHShRw9m7e9VJGdagNeTdMzqPH7AUqr0= -google.golang.org/api v0.138.0/go.mod h1:4xyob8CxC+0GChNBvEUAk8VBKNvYOTWM9T3v3UfRxuY= +google.golang.org/api v0.146.0 h1:9aBYT4vQXt9dhCuLNfwfd3zpwu8atg0yPkjBymwSrOM= +google.golang.org/api v0.146.0/go.mod h1:OARJqIfoYjXJj4C1AiBSXYZt03qsoz8FQYU6fBEfrHM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1248,12 +1267,12 @@ google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaE google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g= -google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= -google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 h1:nIgk/EEq3/YlnmVVXVnm14rC2oxgs1o0ong4sD/rd44= -google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577 h1:wukfNtZmZUurLN/atp2hiIeTKn7QJWIQdHzqmsOnAOk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb h1:XFBgcDwm7irdHTbz4Zk2h7Mh+eis4nfJEFQFYzJzuIA= +google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= +google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb h1:lK0oleSc7IQsUxO3U5TjL9DWlsxpEBemh+zpB7IqhWI= +google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1278,9 +1297,8 @@ google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= -google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1410,8 +1428,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lR sigs.k8s.io/controller-runtime v0.12.2 h1:nqV02cvhbAj7tbt21bpPpTByrXGn2INHRsi39lXy9sE= sigs.k8s.io/controller-runtime v0.12.2/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kustomize/api v0.8.11/go.mod h1:a77Ls36JdfCWojpUqR6m60pdGY1AYFix4AH83nJtY1g= sigs.k8s.io/kustomize/api v0.11.4/go.mod h1:k+8RsqYbgpkIrJ4p9jcdPqe8DprLxFUUO0yNOq8C+xI= sigs.k8s.io/kustomize/kyaml v0.11.0/go.mod h1:GNMwjim4Ypgp/MueD3zXHLRJEjz7RvtPae0AwlvEMFM= diff --git a/hack/verify-generated-crd-code.sh b/hack/verify-generated-crd-code.sh index 1ae3cef94..1d9f23cab 100755 --- a/hack/verify-generated-crd-code.sh +++ b/hack/verify-generated-crd-code.sh @@ -19,7 +19,7 @@ HACK_DIR=$(dirname "${BASH_SOURCE}") ${HACK_DIR}/update-3generated-crd-code.sh # ensure no changes to generated CRDs -if [! git diff --exit-code config/crd/v1/crds/crds.go config/crd/v2alpha1/crds/crds.go >/dev/null]; then +if ! git diff --exit-code config/crd/v1/crds/crds.go config/crd/v2alpha1/crds/crds.go &> /dev/null; then # revert changes to state before running CRD generation to stay consistent # with code-generator `--verify-only` option which discards generated changes git checkout config/crd diff --git a/internal/credentials/file_store.go b/internal/credentials/file_store.go index 1332d4f8d..4b5d25664 100644 --- a/internal/credentials/file_store.go +++ b/internal/credentials/file_store.go @@ -71,7 +71,7 @@ func (n *namespacedFileStore) Path(selector *corev1api.SecretKeySelector) (strin keyFilePath := filepath.Join(n.fsRoot, fmt.Sprintf("%s-%s", selector.Name, selector.Key)) - file, err := n.fs.OpenFile(keyFilePath, os.O_RDWR|os.O_CREATE, 0644) + file, err := n.fs.OpenFile(keyFilePath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) if err != nil { return "", errors.Wrap(err, "unable to open credentials file for writing") } diff --git a/internal/delete/delete_item_action_handler.go b/internal/delete/delete_item_action_handler.go index d85df8a6a..ba242c0ca 100644 --- a/internal/delete/delete_item_action_handler.go +++ b/internal/delete/delete_item_action_handler.go @@ -119,6 +119,7 @@ func InvokeDeleteActions(ctx *Context) error { if !action.Selector.Matches(labels.Set(obj.GetLabels())) { continue } + err = action.DeleteItemAction.Execute(&velero.DeleteItemActionExecuteInput{ Item: obj, Backup: ctx.Backup, diff --git a/internal/hook/wait_exec_hook_handler.go b/internal/hook/wait_exec_hook_handler.go index aa6b9b188..04ad967a7 100644 --- a/internal/hook/wait_exec_hook_handler.go +++ b/internal/hook/wait_exec_hook_handler.go @@ -50,6 +50,11 @@ type DefaultListWatchFactory struct { PodsGetter cache.Getter } +type HookErrInfo struct { + Namespace string + Err error +} + func (d *DefaultListWatchFactory) NewListWatch(namespace string, selector fields.Selector) cache.ListerWatcher { return cache.NewListWatchFromClient(d.PodsGetter, "pods", namespace, selector) } @@ -158,8 +163,8 @@ func (e *DefaultWaitExecHookHandler) HandleHooks( if hook.Hook.WaitTimeout.Duration != 0 && time.Since(waitStart) > hook.Hook.WaitTimeout.Duration { err := fmt.Errorf("hook %s in container %s expired before executing", hook.HookName, hook.Hook.Container) hookLog.Error(err) + errors = append(errors, err) if hook.Hook.OnError == velerov1api.HookErrorModeFail { - errors = append(errors, err) cancel() return } @@ -172,8 +177,9 @@ func (e *DefaultWaitExecHookHandler) HandleHooks( } if err := e.PodCommandExecutor.ExecutePodCommand(hookLog, podMap, pod.Namespace, pod.Name, hook.HookName, eh); err != nil { hookLog.WithError(err).Error("Error executing hook") + err = fmt.Errorf("hook %s in container %s failed to execute, err: %v", hook.HookName, hook.Hook.Container, err) + errors = append(errors, err) if hook.Hook.OnError == velerov1api.HookErrorModeFail { - errors = append(errors, err) cancel() return } @@ -204,10 +210,9 @@ func (e *DefaultWaitExecHookHandler) HandleHooks( podWatcher.Run(ctx.Done()) // There are some cases where this function could return with unexecuted hooks: the pod may - // be deleted, a hook with OnError mode Fail could fail, or it may timeout waiting for + // be deleted, a hook could fail, or it may timeout waiting for // containers to become ready. - // Each unexecuted hook is logged as an error but only hooks with OnError mode Fail return - // an error from this function. + // Each unexecuted hook is logged as an error and this error will be returned from this function. for _, hooks := range byContainer { for _, hook := range hooks { if hook.executed { @@ -222,9 +227,7 @@ func (e *DefaultWaitExecHookHandler) HandleHooks( }, ) hookLog.Error(err) - if hook.Hook.OnError == velerov1api.HookErrorModeFail { - errors = append(errors, err) - } + errors = append(errors, err) } } diff --git a/internal/hook/wait_exec_hook_handler_test.go b/internal/hook/wait_exec_hook_handler_test.go index 2702e9c45..3e809ccfa 100644 --- a/internal/hook/wait_exec_hook_handler_test.go +++ b/internal/hook/wait_exec_hook_handler_test.go @@ -209,10 +209,10 @@ func TestWaitExecHandleHooks(t *testing.T) { Result(), }, }, - expectedErrors: []error{errors.New("pod hook error")}, + expectedErrors: []error{errors.New("hook in container container1 failed to execute, err: pod hook error")}, }, { - name: "should return no error when hook from annotation fails with on error mode continue", + name: "should return error when hook from annotation fails with on error mode continue", initialPod: builder.ForPod("default", "my-pod"). ObjectMeta(builder.WithAnnotations( podRestoreHookCommandAnnotationKey, "/usr/bin/foo", @@ -278,7 +278,7 @@ func TestWaitExecHandleHooks(t *testing.T) { Result(), }, }, - expectedErrors: nil, + expectedErrors: []error{errors.New("hook in container container1 failed to execute, err: pod hook error")}, }, { name: "should return no error when hook from annotation executes after 10ms wait for container to start", @@ -422,7 +422,7 @@ func TestWaitExecHandleHooks(t *testing.T) { }, }, { - name: "should return no error when spec hook with wait timeout expires with OnError mode Continue", + name: "should return error when spec hook with wait timeout expires with OnError mode Continue", groupResource: "pods", initialPod: builder.ForPod("default", "my-pod"). Containers(&v1.Container{ @@ -435,7 +435,7 @@ func TestWaitExecHandleHooks(t *testing.T) { }, }). Result(), - expectedErrors: nil, + expectedErrors: []error{errors.New("hook my-hook-1 in container container1 in pod default/my-pod not executed: context deadline exceeded")}, byContainer: map[string][]PodExecRestoreHook{ "container1": { { @@ -515,8 +515,8 @@ func TestWaitExecHandleHooks(t *testing.T) { sharedHooksContextTimeout: time.Millisecond, }, { - name: "should return no error when shared hooks context is canceled before spec hook with OnError mode Continue executes", - expectedErrors: nil, + name: "should return error when shared hooks context is canceled before spec hook with OnError mode Continue executes", + expectedErrors: []error{errors.New("hook my-hook-1 in container container1 in pod default/my-pod not executed: context deadline exceeded")}, groupResource: "pods", initialPod: builder.ForPod("default", "my-pod"). Containers(&v1.Container{ diff --git a/internal/resourcemodifiers/json_merge_patch.go b/internal/resourcemodifiers/json_merge_patch.go new file mode 100644 index 000000000..5082e2cf5 --- /dev/null +++ b/internal/resourcemodifiers/json_merge_patch.go @@ -0,0 +1,45 @@ +package resourcemodifiers + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch" + "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/yaml" +) + +type JSONMergePatch struct { + PatchData string `json:"patchData,omitempty"` +} + +type JSONMergePatcher struct { + patches []JSONMergePatch +} + +func (p *JSONMergePatcher) Patch(u *unstructured.Unstructured, _ logrus.FieldLogger) (*unstructured.Unstructured, error) { + objBytes, err := u.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("error in marshaling object %s", err) + } + + for _, patch := range p.patches { + patchBytes, err := yaml.YAMLToJSON([]byte(patch.PatchData)) + if err != nil { + return nil, fmt.Errorf("error in converting YAML to JSON %s", err) + } + + objBytes, err = jsonpatch.MergePatch(objBytes, patchBytes) + if err != nil { + return nil, fmt.Errorf("error in applying JSON Patch: %s", err.Error()) + } + } + + updated := &unstructured.Unstructured{} + err = updated.UnmarshalJSON(objBytes) + if err != nil { + return nil, fmt.Errorf("error in unmarshalling modified object %s", err.Error()) + } + + return updated, nil +} diff --git a/internal/resourcemodifiers/json_merge_patch_test.go b/internal/resourcemodifiers/json_merge_patch_test.go new file mode 100644 index 000000000..b7323fc61 --- /dev/null +++ b/internal/resourcemodifiers/json_merge_patch_test.go @@ -0,0 +1,41 @@ +package resourcemodifiers + +import ( + "testing" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" +) + +func TestJsonMergePatchFailure(t *testing.T) { + tests := []struct { + name string + data string + }{ + { + name: "patch with bad yaml", + data: "a: b:", + }, + { + name: "patch with bad json", + data: `{"a"::1}`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scheme := runtime.NewScheme() + err := clientgoscheme.AddToScheme(scheme) + assert.NoError(t, err) + pt := &JSONMergePatcher{ + patches: []JSONMergePatch{{PatchData: tt.data}}, + } + + u := &unstructured.Unstructured{} + _, err = pt.Patch(u, logrus.New()) + assert.Error(t, err) + }) + } +} diff --git a/internal/resourcemodifiers/json_patch.go b/internal/resourcemodifiers/json_patch.go new file mode 100644 index 000000000..b5af7c362 --- /dev/null +++ b/internal/resourcemodifiers/json_patch.go @@ -0,0 +1,96 @@ +package resourcemodifiers + +import ( + "errors" + "fmt" + "strconv" + "strings" + + jsonpatch "github.com/evanphx/json-patch" + "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +type JSONPatch struct { + Operation string `json:"operation"` + From string `json:"from,omitempty"` + Path string `json:"path"` + Value string `json:"value,omitempty"` +} + +func (p *JSONPatch) ToString() string { + if addQuotes(p.Value) { + return fmt.Sprintf(`{"op": "%s", "from": "%s", "path": "%s", "value": "%s"}`, p.Operation, p.From, p.Path, p.Value) + } + return fmt.Sprintf(`{"op": "%s", "from": "%s", "path": "%s", "value": %s}`, p.Operation, p.From, p.Path, p.Value) +} + +func addQuotes(value string) bool { + if value == "" { + return true + } + // if value is null, then don't add quotes + if value == "null" { + return false + } + // if value is a boolean, then don't add quotes + if _, err := strconv.ParseBool(value); err == nil { + return false + } + // if value is a json object or array, then don't add quotes. + if strings.HasPrefix(value, "{") || strings.HasPrefix(value, "[") { + return false + } + // if value is a number, then don't add quotes + if _, err := strconv.ParseFloat(value, 64); err == nil { + return false + } + return true +} + +type JSONPatcher struct { + patches []JSONPatch `yaml:"patches"` +} + +func (p *JSONPatcher) Patch(u *unstructured.Unstructured, logger logrus.FieldLogger) (*unstructured.Unstructured, error) { + modifiedObjBytes, err := p.applyPatch(u) + if err != nil { + if errors.Is(err, jsonpatch.ErrTestFailed) { + logger.Infof("Test operation failed for JSON Patch %s", err.Error()) + return u.DeepCopy(), nil + } + return nil, fmt.Errorf("error in applying JSON Patch %s", err.Error()) + } + + updated := &unstructured.Unstructured{} + err = updated.UnmarshalJSON(modifiedObjBytes) + if err != nil { + return nil, fmt.Errorf("error in unmarshalling modified object %s", err.Error()) + } + + return updated, nil +} + +func (p *JSONPatcher) applyPatch(u *unstructured.Unstructured) ([]byte, error) { + patchBytes := p.patchArrayToByteArray() + jsonPatch, err := jsonpatch.DecodePatch(patchBytes) + if err != nil { + return nil, fmt.Errorf("error in decoding json patch %s", err.Error()) + } + + objBytes, err := u.MarshalJSON() + if err != nil { + return nil, fmt.Errorf("error in marshaling object %s", err.Error()) + } + + return jsonPatch.Apply(objBytes) +} + +func (p *JSONPatcher) patchArrayToByteArray() []byte { + var patches []string + for _, patch := range p.patches { + patches = append(patches, patch.ToString()) + } + patchesStr := strings.Join(patches, ",\n\t") + return []byte(fmt.Sprintf(`[%s]`, patchesStr)) +} diff --git a/internal/resourcemodifiers/resource_modifiers.go b/internal/resourcemodifiers/resource_modifiers.go index b5d3047ef..d4ae93e71 100644 --- a/internal/resourcemodifiers/resource_modifiers.go +++ b/internal/resourcemodifiers/resource_modifiers.go @@ -18,16 +18,16 @@ package resourcemodifiers import ( "fmt" "regexp" - "strconv" - "strings" jsonpatch "github.com/evanphx/json-patch" + "github.com/gobwas/glob" "github.com/pkg/errors" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/yaml" "github.com/vmware-tanzu/velero/pkg/util/collections" @@ -38,11 +38,9 @@ const ( ResourceModifierSupportedVersionV1 = "v1" ) -type JSONPatch struct { - Operation string `json:"operation"` - From string `json:"from,omitempty"` - Path string `json:"path"` - Value string `json:"value,omitempty"` +type MatchRule struct { + Path string `json:"path,omitempty"` + Value string `json:"value,omitempty"` } type Conditions struct { @@ -50,11 +48,14 @@ type Conditions struct { GroupResource string `json:"groupResource"` ResourceNameRegex string `json:"resourceNameRegex,omitempty"` LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"` + Matches []MatchRule `json:"matches,omitempty"` } type ResourceModifierRule struct { - Conditions Conditions `json:"conditions"` - Patches []JSONPatch `json:"patches"` + Conditions Conditions `json:"conditions"` + Patches []JSONPatch `json:"patches,omitempty"` + MergePatches []JSONMergePatch `json:"mergePatches,omitempty"` + StrategicPatches []StrategicMergePatch `json:"strategicPatches,omitempty"` } type ResourceModifiers struct { @@ -83,10 +84,10 @@ func GetResourceModifiersFromConfig(cm *v1.ConfigMap) (*ResourceModifiers, error return resModifiers, nil } -func (p *ResourceModifiers) ApplyResourceModifierRules(obj *unstructured.Unstructured, groupResource string, log logrus.FieldLogger) []error { +func (p *ResourceModifiers) ApplyResourceModifierRules(obj *unstructured.Unstructured, groupResource string, scheme *runtime.Scheme, log logrus.FieldLogger) []error { var errs []error for _, rule := range p.ResourceModifierRules { - err := rule.Apply(obj, groupResource, log) + err := rule.apply(obj, groupResource, scheme, log) if err != nil { errs = append(errs, err) } @@ -95,13 +96,22 @@ func (p *ResourceModifiers) ApplyResourceModifierRules(obj *unstructured.Unstruc return errs } -func (r *ResourceModifierRule) Apply(obj *unstructured.Unstructured, groupResource string, log logrus.FieldLogger) error { - namespaceInclusion := collections.NewIncludesExcludes().Includes(r.Conditions.Namespaces...) - if !namespaceInclusion.ShouldInclude(obj.GetNamespace()) { - return nil +func (r *ResourceModifierRule) apply(obj *unstructured.Unstructured, groupResource string, scheme *runtime.Scheme, log logrus.FieldLogger) error { + ns := obj.GetNamespace() + if ns != "" { + namespaceInclusion := collections.NewIncludesExcludes().Includes(r.Conditions.Namespaces...) + if !namespaceInclusion.ShouldInclude(ns) { + return nil + } } - if r.Conditions.GroupResource != groupResource { + g, err := glob.Compile(r.Conditions.GroupResource, '.') + if err != nil { + log.Errorf("Bad glob pattern of groupResource in condition, groupResource: %s, err: %s", r.Conditions.GroupResource, err) + return err + } + + if !g.Match(groupResource) { return nil } @@ -125,87 +135,82 @@ func (r *ResourceModifierRule) Apply(obj *unstructured.Unstructured, groupResour } } - patches, err := r.PatchArrayToByteArray() + match, err := matchConditions(obj, r.Conditions.Matches, log) if err != nil { return err + } else if !match { + log.Info("Conditions do not match, skip it") + return nil } + log.Infof("Applying resource modifier patch on %s/%s", obj.GetNamespace(), obj.GetName()) - err = ApplyPatch(patches, obj, log) + err = r.applyPatch(obj, scheme, log) if err != nil { return err } return nil } -// PatchArrayToByteArray converts all JsonPatch to string array with the format of jsonpatch.Patch and then convert it to byte array -func (r *ResourceModifierRule) PatchArrayToByteArray() ([]byte, error) { - var patches []string - for _, patch := range r.Patches { - patches = append(patches, patch.ToString()) +func matchConditions(u *unstructured.Unstructured, rules []MatchRule, _ logrus.FieldLogger) (bool, error) { + if len(rules) == 0 { + return true, nil } - patchesStr := strings.Join(patches, ",\n\t") - return []byte(fmt.Sprintf(`[%s]`, patchesStr)), nil -} -func (p *JSONPatch) ToString() string { - if addQuotes(p.Value) { - return fmt.Sprintf(`{"op": "%s", "from": "%s", "path": "%s", "value": "%s"}`, p.Operation, p.From, p.Path, p.Value) - } - return fmt.Sprintf(`{"op": "%s", "from": "%s", "path": "%s", "value": %s}`, p.Operation, p.From, p.Path, p.Value) -} + var fixed []JSONPatch + for _, rule := range rules { + if rule.Path == "" { + return false, fmt.Errorf("path is required for match rule") + } -func ApplyPatch(patch []byte, obj *unstructured.Unstructured, log logrus.FieldLogger) error { - jsonPatch, err := jsonpatch.DecodePatch(patch) - if err != nil { - return fmt.Errorf("error in decoding json patch %s", err.Error()) + fixed = append(fixed, JSONPatch{ + Operation: "test", + Path: rule.Path, + Value: rule.Value, + }) } - objBytes, err := obj.MarshalJSON() - if err != nil { - return fmt.Errorf("error in marshaling object %s", err.Error()) - } - modifiedObjBytes, err := jsonPatch.Apply(objBytes) + + p := &JSONPatcher{patches: fixed} + _, err := p.applyPatch(u) if err != nil { if errors.Is(err, jsonpatch.ErrTestFailed) { - log.Infof("Test operation failed for JSON Patch %s", err.Error()) - return nil + return false, nil } - return fmt.Errorf("error in applying JSON Patch %s", err.Error()) + return false, err } - err = obj.UnmarshalJSON(modifiedObjBytes) - if err != nil { - return fmt.Errorf("error in unmarshalling modified object %s", err.Error()) - } - return nil + + return true, nil } func unmarshalResourceModifiers(yamlData []byte) (*ResourceModifiers, error) { resModifiers := &ResourceModifiers{} err := yaml.UnmarshalStrict(yamlData, resModifiers) if err != nil { - return nil, fmt.Errorf("failed to decode yaml data into resource modifiers %v", err) + return nil, fmt.Errorf("failed to decode yaml data into resource modifiers, err: %s", err) } return resModifiers, nil } -func addQuotes(value string) bool { - if value == "" { - return true - } - // if value is null, then don't add quotes - if value == "null" { - return false - } - // if value is a boolean, then don't add quotes - if _, err := strconv.ParseBool(value); err == nil { - return false - } - // if value is a json object or array, then don't add quotes. - if strings.HasPrefix(value, "{") || strings.HasPrefix(value, "[") { - return false - } - // if value is a number, then don't add quotes - if _, err := strconv.ParseFloat(value, 64); err == nil { - return false - } - return true +type patcher interface { + Patch(u *unstructured.Unstructured, logger logrus.FieldLogger) (*unstructured.Unstructured, error) +} + +func (r *ResourceModifierRule) applyPatch(u *unstructured.Unstructured, scheme *runtime.Scheme, logger logrus.FieldLogger) error { + var p patcher + if len(r.Patches) > 0 { + p = &JSONPatcher{patches: r.Patches} + } else if len(r.MergePatches) > 0 { + p = &JSONMergePatcher{patches: r.MergePatches} + } else if len(r.StrategicPatches) > 0 { + p = &StrategicMergePatcher{patches: r.StrategicPatches, scheme: scheme} + } else { + return fmt.Errorf("no patch data found") + } + + updated, err := p.Patch(u, logger) + if err != nil { + return fmt.Errorf("error in applying patch %s", err) + } + + u.SetUnstructuredContent(updated.Object) + return nil } diff --git a/internal/resourcemodifiers/resource_modifiers_test.go b/internal/resourcemodifiers/resource_modifiers_test.go index a8e61dae4..b09b473d9 100644 --- a/internal/resourcemodifiers/resource_modifiers_test.go +++ b/internal/resourcemodifiers/resource_modifiers_test.go @@ -24,6 +24,10 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer/yaml" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" ) func TestGetResourceModifiersFromConfig(t *testing.T) { @@ -131,6 +135,128 @@ func TestGetResourceModifiersFromConfig(t *testing.T) { }, } + cm5 := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-configmap", + Namespace: "test-namespace", + }, + Data: map[string]string{ + "sub.yml": "version: v1\nresourceModifierRules:\n- conditions:\n groupResource: pods\n namespaces:\n - ns1\n matches:\n - path: /metadata/annotations/foo\n value: bar\n mergePatches:\n - patchData: |\n metadata:\n annotations:\n foo: null", + }, + } + + rules5 := &ResourceModifiers{ + Version: "v1", + ResourceModifierRules: []ResourceModifierRule{ + { + Conditions: Conditions{ + GroupResource: "pods", + Namespaces: []string{ + "ns1", + }, + Matches: []MatchRule{ + { + Path: "/metadata/annotations/foo", + Value: "bar", + }, + }, + }, + MergePatches: []JSONMergePatch{ + { + PatchData: "metadata:\n annotations:\n foo: null", + }, + }, + }, + }, + } + + cm6 := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-configmap", + Namespace: "test-namespace", + }, + Data: map[string]string{ + "sub.yml": "version: v1\nresourceModifierRules:\n- conditions:\n groupResource: pods\n namespaces:\n - ns1\n strategicPatches:\n - patchData: |\n spec:\n containers:\n - name: nginx\n image: repo2/nginx", + }, + } + + rules6 := &ResourceModifiers{ + Version: "v1", + ResourceModifierRules: []ResourceModifierRule{ + { + Conditions: Conditions{ + GroupResource: "pods", + Namespaces: []string{ + "ns1", + }, + }, + StrategicPatches: []StrategicMergePatch{ + { + PatchData: "spec:\n containers:\n - name: nginx\n image: repo2/nginx", + }, + }, + }, + }, + } + + cm7 := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-configmap", + Namespace: "test-namespace", + }, + Data: map[string]string{ + "sub.yml": "version: v1\nresourceModifierRules:\n- conditions:\n groupResource: pods\n namespaces:\n - ns1\n mergePatches:\n - patchData: |\n {\"metadata\":{\"annotations\":{\"foo\":null}}}", + }, + } + + rules7 := &ResourceModifiers{ + Version: "v1", + ResourceModifierRules: []ResourceModifierRule{ + { + Conditions: Conditions{ + GroupResource: "pods", + Namespaces: []string{ + "ns1", + }, + }, + MergePatches: []JSONMergePatch{ + { + PatchData: `{"metadata":{"annotations":{"foo":null}}}`, + }, + }, + }, + }, + } + + cm8 := &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-configmap", + Namespace: "test-namespace", + }, + Data: map[string]string{ + "sub.yml": "version: v1\nresourceModifierRules:\n- conditions:\n groupResource: pods\n namespaces:\n - ns1\n strategicPatches:\n - patchData: |\n {\"spec\":{\"containers\":[{\"name\": \"nginx\",\"image\": \"repo2/nginx\"}]}}", + }, + } + + rules8 := &ResourceModifiers{ + Version: "v1", + ResourceModifierRules: []ResourceModifierRule{ + { + Conditions: Conditions{ + GroupResource: "pods", + Namespaces: []string{ + "ns1", + }, + }, + StrategicPatches: []StrategicMergePatch{ + { + PatchData: `{"spec":{"containers":[{"name": "nginx","image": "repo2/nginx"}]}}`, + }, + }, + }, + }, + } + type args struct { cm *v1.ConfigMap } @@ -180,6 +306,38 @@ func TestGetResourceModifiersFromConfig(t *testing.T) { want: nil, wantErr: true, }, + { + name: "complex yaml data with json merge patch", + args: args{ + cm: cm5, + }, + want: rules5, + wantErr: false, + }, + { + name: "complex yaml data with strategic merge patch", + args: args{ + cm: cm6, + }, + want: rules6, + wantErr: false, + }, + { + name: "complex json data with json merge patch", + args: args{ + cm: cm7, + }, + want: rules7, + wantErr: false, + }, + { + name: "complex json data with strategic merge patch", + args: args{ + cm: cm8, + }, + want: rules8, + wantErr: false, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -502,6 +660,38 @@ func TestResourceModifiers_ApplyResourceModifierRules(t *testing.T) { wantErr: false, wantObj: deployNginxTwoReplica.DeepCopy(), }, + { + name: "nginx deployment: Empty Resource Regex", + fields: fields{ + Version: "v1", + ResourceModifierRules: []ResourceModifierRule{ + { + Conditions: Conditions{ + GroupResource: "deployments.apps", + Namespaces: []string{"foo"}, + }, + Patches: []JSONPatch{ + { + Operation: "test", + Path: "/spec/replicas", + Value: "1", + }, + { + Operation: "replace", + Path: "/spec/replicas", + Value: "2", + }, + }, + }, + }, + }, + args: args{ + obj: deployNginxOneReplica.DeepCopy(), + groupResource: "deployments.apps", + }, + wantErr: false, + wantObj: deployNginxTwoReplica.DeepCopy(), + }, { name: "nginx deployment: Empty Resource Regex and namespaces list", fields: fields{ @@ -719,7 +909,7 @@ func TestResourceModifiers_ApplyResourceModifierRules(t *testing.T) { Version: tt.fields.Version, ResourceModifierRules: tt.fields.ResourceModifierRules, } - got := p.ApplyResourceModifierRules(tt.args.obj, tt.args.groupResource, logrus.New()) + got := p.ApplyResourceModifierRules(tt.args.obj, tt.args.groupResource, nil, logrus.New()) assert.Equal(t, tt.wantErr, len(got) > 0) assert.Equal(t, *tt.wantObj, *tt.args.obj) @@ -727,6 +917,633 @@ func TestResourceModifiers_ApplyResourceModifierRules(t *testing.T) { } } +var podYAMLWithNginxImage = ` +apiVersion: v1 +kind: Pod +metadata: + name: pod1 + namespace: fake +spec: + containers: + - image: nginx + name: nginx +` + +var podYAMLWithNginx1Image = ` +apiVersion: v1 +kind: Pod +metadata: + name: pod1 + namespace: fake +spec: + containers: + - image: nginx1 + name: nginx +` + +var podYAMLWithNFSVolume = ` +apiVersion: v1 +kind: Pod +metadata: + name: pod1 + namespace: fake +spec: + containers: + - image: fake + name: fake + volumeMounts: + - mountPath: /fake1 + name: vol1 + - mountPath: /fake2 + name: vol2 + volumes: + - name: vol1 + nfs: + path: /fake2 + - name: vol2 + emptyDir: {} +` + +var podYAMLWithPVCVolume = ` +apiVersion: v1 +kind: Pod +metadata: + name: pod1 + namespace: fake +spec: + containers: + - image: fake + name: fake + volumeMounts: + - mountPath: /fake1 + name: vol1 + - mountPath: /fake2 + name: vol2 + volumes: + - name: vol1 + persistentVolumeClaim: + claimName: pvc1 + - name: vol2 + emptyDir: {} +` + +var svcYAMLWithPort8000 = ` +apiVersion: v1 +kind: Service +metadata: + name: svc1 + namespace: fake +spec: + ports: + - name: fake1 + port: 8001 + protocol: TCP + targetPort: 8001 + - name: fake + port: 8000 + protocol: TCP + targetPort: 8000 + - name: fake2 + port: 8002 + protocol: TCP + targetPort: 8002 +` + +var svcYAMLWithPort9000 = ` +apiVersion: v1 +kind: Service +metadata: + name: svc1 + namespace: fake +spec: + ports: + - name: fake1 + port: 8001 + protocol: TCP + targetPort: 8001 + - name: fake + port: 9000 + protocol: TCP + targetPort: 9000 + - name: fake2 + port: 8002 + protocol: TCP + targetPort: 8002 +` + +var cmYAMLWithLabelAToB = ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm1 + namespace: fake + labels: + a: b + c: d +` + +var cmYAMLWithLabelAToC = ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm1 + namespace: fake + labels: + a: c + c: d +` + +var cmYAMLWithoutLabelA = ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: cm1 + namespace: fake + labels: + c: d +` + +func TestResourceModifiers_ApplyResourceModifierRules_StrategicMergePatch(t *testing.T) { + scheme := runtime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + unstructuredSerializer := yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme) + o1, _, err := unstructuredSerializer.Decode([]byte(podYAMLWithNFSVolume), nil, nil) + assert.NoError(t, err) + podWithNFSVolume := o1.(*unstructured.Unstructured) + + o2, _, err := unstructuredSerializer.Decode([]byte(podYAMLWithPVCVolume), nil, nil) + assert.NoError(t, err) + podWithPVCVolume := o2.(*unstructured.Unstructured) + + o3, _, err := unstructuredSerializer.Decode([]byte(svcYAMLWithPort8000), nil, nil) + assert.NoError(t, err) + svcWithPort8000 := o3.(*unstructured.Unstructured) + + o4, _, err := unstructuredSerializer.Decode([]byte(svcYAMLWithPort9000), nil, nil) + assert.NoError(t, err) + svcWithPort9000 := o4.(*unstructured.Unstructured) + + o5, _, err := unstructuredSerializer.Decode([]byte(podYAMLWithNginxImage), nil, nil) + assert.NoError(t, err) + podWithNginxImage := o5.(*unstructured.Unstructured) + + o6, _, err := unstructuredSerializer.Decode([]byte(podYAMLWithNginx1Image), nil, nil) + assert.NoError(t, err) + podWithNginx1Image := o6.(*unstructured.Unstructured) + + tests := []struct { + name string + rm *ResourceModifiers + obj *unstructured.Unstructured + groupResource string + wantErr bool + wantObj *unstructured.Unstructured + }{ + { + name: "update image", + rm: &ResourceModifiers{ + Version: "v1", + ResourceModifierRules: []ResourceModifierRule{ + { + Conditions: Conditions{ + GroupResource: "pods", + Namespaces: []string{"fake"}, + }, + StrategicPatches: []StrategicMergePatch{ + { + PatchData: `{"spec":{"containers":[{"name":"nginx","image":"nginx1"}]}}`, + }, + }, + }, + }, + }, + obj: podWithNginxImage.DeepCopy(), + groupResource: "pods", + wantErr: false, + wantObj: podWithNginx1Image.DeepCopy(), + }, + { + name: "update image with yaml format", + rm: &ResourceModifiers{ + Version: "v1", + ResourceModifierRules: []ResourceModifierRule{ + { + Conditions: Conditions{ + GroupResource: "pods", + Namespaces: []string{"fake"}, + }, + StrategicPatches: []StrategicMergePatch{ + { + PatchData: `spec: + containers: + - name: nginx + image: nginx1`, + }, + }, + }, + }, + }, + obj: podWithNginxImage.DeepCopy(), + groupResource: "pods", + wantErr: false, + wantObj: podWithNginx1Image.DeepCopy(), + }, + { + name: "replace nfs with pvc in volume", + rm: &ResourceModifiers{ + Version: "v1", + ResourceModifierRules: []ResourceModifierRule{ + { + Conditions: Conditions{ + GroupResource: "pods", + Namespaces: []string{"fake"}, + }, + StrategicPatches: []StrategicMergePatch{ + { + PatchData: `{"spec":{"volumes":[{"nfs":null,"name":"vol1","persistentVolumeClaim":{"claimName":"pvc1"}}]}}`, + }, + }, + }, + }, + }, + obj: podWithNFSVolume.DeepCopy(), + groupResource: "pods", + wantErr: false, + wantObj: podWithPVCVolume.DeepCopy(), + }, + { + name: "replace any other volume source with pvc in volume", + rm: &ResourceModifiers{ + Version: "v1", + ResourceModifierRules: []ResourceModifierRule{ + { + Conditions: Conditions{ + GroupResource: "pods", + Namespaces: []string{"fake"}, + }, + StrategicPatches: []StrategicMergePatch{ + { + PatchData: `{"spec":{"volumes":[{"$retainKeys":["name","persistentVolumeClaim"],"name":"vol1","persistentVolumeClaim":{"claimName":"pvc1"}}]}}`, + }, + }, + }, + }, + }, + obj: podWithNFSVolume.DeepCopy(), + groupResource: "pods", + wantErr: false, + wantObj: podWithPVCVolume.DeepCopy(), + }, + { + name: "update a service port", + rm: &ResourceModifiers{ + Version: "v1", + ResourceModifierRules: []ResourceModifierRule{ + { + Conditions: Conditions{ + GroupResource: "services", + Namespaces: []string{"fake"}, + }, + StrategicPatches: []StrategicMergePatch{ + { + PatchData: `{"spec":{"$setElementOrder/ports":[{"port":8001},{"port":9000},{"port":8002}],"ports":[{"name":"fake","port":9000,"protocol":"TCP","targetPort":9000},{"$patch":"delete","port":8000}]}}`, + }, + }, + }, + }, + }, + obj: svcWithPort8000.DeepCopy(), + groupResource: "services", + wantErr: false, + wantObj: svcWithPort9000.DeepCopy(), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.rm.ApplyResourceModifierRules(tt.obj, tt.groupResource, scheme, logrus.New()) + + assert.Equal(t, tt.wantErr, len(got) > 0) + assert.Equal(t, *tt.wantObj, *tt.obj) + }) + } +} + +func TestResourceModifiers_ApplyResourceModifierRules_JSONMergePatch(t *testing.T) { + unstructuredSerializer := yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme) + o1, _, err := unstructuredSerializer.Decode([]byte(cmYAMLWithLabelAToB), nil, nil) + assert.NoError(t, err) + cmWithLabelAToB := o1.(*unstructured.Unstructured) + + o2, _, err := unstructuredSerializer.Decode([]byte(cmYAMLWithLabelAToC), nil, nil) + assert.NoError(t, err) + cmWithLabelAToC := o2.(*unstructured.Unstructured) + + o3, _, err := unstructuredSerializer.Decode([]byte(cmYAMLWithoutLabelA), nil, nil) + assert.NoError(t, err) + cmWithoutLabelA := o3.(*unstructured.Unstructured) + + tests := []struct { + name string + rm *ResourceModifiers + obj *unstructured.Unstructured + groupResource string + wantErr bool + wantObj *unstructured.Unstructured + }{ + { + name: "update labels", + rm: &ResourceModifiers{ + Version: "v1", + ResourceModifierRules: []ResourceModifierRule{ + { + Conditions: Conditions{ + GroupResource: "configmaps", + Namespaces: []string{"fake"}, + }, + MergePatches: []JSONMergePatch{ + { + PatchData: `{"metadata":{"labels":{"a":"c"}}}`, + }, + }, + }, + }, + }, + obj: cmWithLabelAToB.DeepCopy(), + groupResource: "configmaps", + wantErr: false, + wantObj: cmWithLabelAToC.DeepCopy(), + }, + { + name: "update labels in yaml format", + rm: &ResourceModifiers{ + Version: "v1", + ResourceModifierRules: []ResourceModifierRule{ + { + Conditions: Conditions{ + GroupResource: "configmaps", + Namespaces: []string{"fake"}, + }, + MergePatches: []JSONMergePatch{ + { + PatchData: `metadata: + labels: + a: c`, + }, + }, + }, + }, + }, + obj: cmWithLabelAToB.DeepCopy(), + groupResource: "configmaps", + wantErr: false, + wantObj: cmWithLabelAToC.DeepCopy(), + }, + { + name: "delete labels", + rm: &ResourceModifiers{ + Version: "v1", + ResourceModifierRules: []ResourceModifierRule{ + { + Conditions: Conditions{ + GroupResource: "configmaps", + Namespaces: []string{"fake"}, + }, + MergePatches: []JSONMergePatch{ + { + PatchData: `{"metadata":{"labels":{"a":null}}}`, + }, + }, + }, + }, + }, + obj: cmWithLabelAToB.DeepCopy(), + groupResource: "configmaps", + wantErr: false, + wantObj: cmWithoutLabelA.DeepCopy(), + }, + { + name: "add labels", + rm: &ResourceModifiers{ + Version: "v1", + ResourceModifierRules: []ResourceModifierRule{ + { + Conditions: Conditions{ + GroupResource: "configmaps", + Namespaces: []string{"fake"}, + }, + MergePatches: []JSONMergePatch{ + { + PatchData: `{"metadata":{"labels":{"a":"b"}}}`, + }, + }, + }, + }, + }, + obj: cmWithoutLabelA.DeepCopy(), + groupResource: "configmaps", + wantErr: false, + wantObj: cmWithLabelAToB.DeepCopy(), + }, + { + name: "delete non-existing labels", + rm: &ResourceModifiers{ + Version: "v1", + ResourceModifierRules: []ResourceModifierRule{ + { + Conditions: Conditions{ + GroupResource: "configmaps", + Namespaces: []string{"fake"}, + }, + MergePatches: []JSONMergePatch{ + { + PatchData: `{"metadata":{"labels":{"a":null}}}`, + }, + }, + }, + }, + }, + obj: cmWithoutLabelA.DeepCopy(), + groupResource: "configmaps", + wantErr: false, + wantObj: cmWithoutLabelA.DeepCopy(), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.rm.ApplyResourceModifierRules(tt.obj, tt.groupResource, nil, logrus.New()) + + assert.Equal(t, tt.wantErr, len(got) > 0) + assert.Equal(t, *tt.wantObj, *tt.obj) + }) + } +} + +func TestResourceModifiers_wildcard_in_GroupResource(t *testing.T) { + unstructuredSerializer := yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme) + o1, _, err := unstructuredSerializer.Decode([]byte(cmYAMLWithLabelAToB), nil, nil) + assert.NoError(t, err) + cmWithLabelAToB := o1.(*unstructured.Unstructured) + + o2, _, err := unstructuredSerializer.Decode([]byte(cmYAMLWithLabelAToC), nil, nil) + assert.NoError(t, err) + cmWithLabelAToC := o2.(*unstructured.Unstructured) + + tests := []struct { + name string + rm *ResourceModifiers + obj *unstructured.Unstructured + groupResource string + wantErr bool + wantObj *unstructured.Unstructured + }{ + { + name: "match all groups and resources", + rm: &ResourceModifiers{ + Version: "v1", + ResourceModifierRules: []ResourceModifierRule{ + { + Conditions: Conditions{ + GroupResource: "*", + Namespaces: []string{"fake"}, + }, + MergePatches: []JSONMergePatch{ + { + PatchData: `{"metadata":{"labels":{"a":"c"}}}`, + }, + }, + }, + }, + }, + obj: cmWithLabelAToB.DeepCopy(), + groupResource: "configmaps", + wantErr: false, + wantObj: cmWithLabelAToC.DeepCopy(), + }, + { + name: "match all resources in group apps", + rm: &ResourceModifiers{ + Version: "v1", + ResourceModifierRules: []ResourceModifierRule{ + { + Conditions: Conditions{ + GroupResource: "*.apps", + Namespaces: []string{"fake"}, + }, + MergePatches: []JSONMergePatch{ + { + PatchData: `{"metadata":{"labels":{"a":"c"}}}`, + }, + }, + }, + }, + }, + obj: cmWithLabelAToB.DeepCopy(), + groupResource: "fake.apps", + wantErr: false, + wantObj: cmWithLabelAToC.DeepCopy(), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.rm.ApplyResourceModifierRules(tt.obj, tt.groupResource, nil, logrus.New()) + + assert.Equal(t, tt.wantErr, len(got) > 0) + assert.Equal(t, *tt.wantObj, *tt.obj) + }) + } +} + +func TestResourceModifiers_conditional_patches(t *testing.T) { + unstructuredSerializer := yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme) + o1, _, err := unstructuredSerializer.Decode([]byte(cmYAMLWithLabelAToB), nil, nil) + assert.NoError(t, err) + cmWithLabelAToB := o1.(*unstructured.Unstructured) + + o2, _, err := unstructuredSerializer.Decode([]byte(cmYAMLWithLabelAToC), nil, nil) + assert.NoError(t, err) + cmWithLabelAToC := o2.(*unstructured.Unstructured) + + tests := []struct { + name string + rm *ResourceModifiers + obj *unstructured.Unstructured + groupResource string + wantErr bool + wantObj *unstructured.Unstructured + }{ + { + name: "match conditions and apply patches", + rm: &ResourceModifiers{ + Version: "v1", + ResourceModifierRules: []ResourceModifierRule{ + { + Conditions: Conditions{ + GroupResource: "*", + Namespaces: []string{"fake"}, + Matches: []MatchRule{ + { + Path: "/metadata/labels/a", + Value: "b", + }, + }, + }, + MergePatches: []JSONMergePatch{ + { + PatchData: `{"metadata":{"labels":{"a":"c"}}}`, + }, + }, + }, + }, + }, + obj: cmWithLabelAToB.DeepCopy(), + groupResource: "configmaps", + wantErr: false, + wantObj: cmWithLabelAToC.DeepCopy(), + }, + { + name: "mismatch conditions and skip patches", + rm: &ResourceModifiers{ + Version: "v1", + ResourceModifierRules: []ResourceModifierRule{ + { + Conditions: Conditions{ + GroupResource: "*", + Namespaces: []string{"fake"}, + Matches: []MatchRule{ + { + Path: "/metadata/labels/a", + Value: "c", + }, + }, + }, + MergePatches: []JSONMergePatch{ + { + PatchData: `{"metadata":{"labels":{"a":"c"}}}`, + }, + }, + }, + }, + }, + obj: cmWithLabelAToB.DeepCopy(), + groupResource: "configmaps", + wantErr: false, + wantObj: cmWithLabelAToB.DeepCopy(), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.rm.ApplyResourceModifierRules(tt.obj, tt.groupResource, nil, logrus.New()) + + assert.Equal(t, tt.wantErr, len(got) > 0) + assert.Equal(t, *tt.wantObj, *tt.obj) + }) + } +} + func TestJSONPatch_ToString(t *testing.T) { type fields struct { Operation string diff --git a/internal/resourcemodifiers/resource_modifiers_validator.go b/internal/resourcemodifiers/resource_modifiers_validator.go index aca3fe923..6742ef8c7 100644 --- a/internal/resourcemodifiers/resource_modifiers_validator.go +++ b/internal/resourcemodifiers/resource_modifiers_validator.go @@ -24,6 +24,21 @@ func (r *ResourceModifierRule) Validate() error { if err := r.Conditions.Validate(); err != nil { return err } + + count := 0 + for _, size := range []int{ + len(r.Patches), + len(r.MergePatches), + len(r.StrategicPatches), + } { + if size != 0 { + count++ + } + if count >= 2 { + return fmt.Errorf("only one of patches, mergePatches, strategicPatches can be specified") + } + } + for _, patch := range r.Patches { if err := patch.Validate(); err != nil { return err diff --git a/internal/resourcemodifiers/resource_modifiers_validator_test.go b/internal/resourcemodifiers/resource_modifiers_validator_test.go index 4011ba0d2..4c31e929a 100644 --- a/internal/resourcemodifiers/resource_modifiers_validator_test.go +++ b/internal/resourcemodifiers/resource_modifiers_validator_test.go @@ -129,6 +129,32 @@ func TestResourceModifiers_Validate(t *testing.T) { }, wantErr: true, }, + { + name: "More than one patch type in a rule", + fields: fields{ + Version: "v1", + ResourceModifierRules: []ResourceModifierRule{ + { + Conditions: Conditions{ + GroupResource: "*", + }, + Patches: []JSONPatch{ + { + Operation: "test", + Path: "/spec/storageClassName", + Value: "premium", + }, + }, + MergePatches: []JSONMergePatch{ + { + PatchData: `{"metadata":{"labels":{"a":null}}}`, + }, + }, + }, + }, + }, + wantErr: true, + }, } for _, tt := range tests { diff --git a/internal/resourcemodifiers/strategic_merge_patch.go b/internal/resourcemodifiers/strategic_merge_patch.go new file mode 100644 index 000000000..8d3fe4c51 --- /dev/null +++ b/internal/resourcemodifiers/strategic_merge_patch.go @@ -0,0 +1,143 @@ +package resourcemodifiers + +import ( + "fmt" + "net/http" + + "github.com/sirupsen/logrus" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/mergepatch" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/validation/field" + kubejson "sigs.k8s.io/json" + "sigs.k8s.io/yaml" +) + +type StrategicMergePatch struct { + PatchData string `json:"patchData,omitempty"` +} + +type StrategicMergePatcher struct { + patches []StrategicMergePatch + scheme *runtime.Scheme +} + +func (p *StrategicMergePatcher) Patch(u *unstructured.Unstructured, _ logrus.FieldLogger) (*unstructured.Unstructured, error) { + gvk := u.GetObjectKind().GroupVersionKind() + schemaReferenceObj, err := p.scheme.New(gvk) + if err != nil { + return nil, err + } + + origin := u.DeepCopy() + updated := u.DeepCopy() + for _, patch := range p.patches { + patchBytes, err := yaml.YAMLToJSON([]byte(patch.PatchData)) + if err != nil { + return nil, fmt.Errorf("error in converting YAML to JSON %s", err) + } + + err = strategicPatchObject(origin, patchBytes, updated, schemaReferenceObj) + if err != nil { + return nil, fmt.Errorf("error in applying Strategic Patch %s", err.Error()) + } + + origin = updated.DeepCopy() + } + + return updated, nil +} + +// strategicPatchObject applies a strategic merge patch of `patchBytes` to +// `originalObject` and stores the result in `objToUpdate`. +// It additionally returns the map[string]interface{} representation of the +// `originalObject` and `patchBytes`. +// NOTE: Both `originalObject` and `objToUpdate` are supposed to be versioned. +func strategicPatchObject( + originalObject runtime.Object, + patchBytes []byte, + objToUpdate runtime.Object, + schemaReferenceObj runtime.Object, +) error { + originalObjMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(originalObject) + if err != nil { + return err + } + + patchMap := make(map[string]interface{}) + var strictErrs []error + strictErrs, err = kubejson.UnmarshalStrict(patchBytes, &patchMap) + if err != nil { + return apierrors.NewBadRequest(err.Error()) + } + + if err := applyPatchToObject(originalObjMap, patchMap, objToUpdate, schemaReferenceObj, strictErrs); err != nil { + return err + } + return nil +} + +// applyPatchToObject applies a strategic merge patch of to +// and stores the result in . +// NOTE: must be a versioned object. +func applyPatchToObject( + originalMap map[string]interface{}, + patchMap map[string]interface{}, + objToUpdate runtime.Object, + schemaReferenceObj runtime.Object, + strictErrs []error, +) error { + patchedObjMap, err := strategicpatch.StrategicMergeMapPatch(originalMap, patchMap, schemaReferenceObj) + if err != nil { + return interpretStrategicMergePatchError(err) + } + + // Rather than serialize the patched map to JSON, then decode it to an object, we go directly from a map to an object + converter := runtime.DefaultUnstructuredConverter + if err := converter.FromUnstructuredWithValidation(patchedObjMap, objToUpdate, true); err != nil { + strictError, isStrictError := runtime.AsStrictDecodingError(err) + switch { + case !isStrictError: + // disregard any sttrictErrs, because it's an incomplete + // list of strict errors given that we don't know what fields were + // unknown because StrategicMergeMapPatch failed. + // Non-strict errors trump in this case. + return apierrors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{ + field.Invalid(field.NewPath("patch"), fmt.Sprintf("%+v", patchMap), err.Error()), + }) + //case validationDirective == metav1.FieldValidationWarn: + // addStrictDecodingWarnings(requestContext, append(strictErrs, strictError.Errors()...)) + default: + strictDecodingError := runtime.NewStrictDecodingError(append(strictErrs, strictError.Errors()...)) + return apierrors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{ + field.Invalid(field.NewPath("patch"), fmt.Sprintf("%+v", patchMap), strictDecodingError.Error()), + }) + } + } else if len(strictErrs) > 0 { + switch { + //case validationDirective == metav1.FieldValidationWarn: + // addStrictDecodingWarnings(requestContext, strictErrs) + default: + return apierrors.NewInvalid(schema.GroupKind{}, "", field.ErrorList{ + field.Invalid(field.NewPath("patch"), fmt.Sprintf("%+v", patchMap), runtime.NewStrictDecodingError(strictErrs).Error()), + }) + } + } + + return nil +} + +// interpretStrategicMergePatchError interprets the error type and returns an error with appropriate HTTP code. +func interpretStrategicMergePatchError(err error) error { + switch err { + case mergepatch.ErrBadJSONDoc, mergepatch.ErrBadPatchFormatForPrimitiveList, mergepatch.ErrBadPatchFormatForRetainKeys, mergepatch.ErrBadPatchFormatForSetElementOrderList, mergepatch.ErrUnsupportedStrategicMergePatchFormat: + return apierrors.NewBadRequest(err.Error()) + case mergepatch.ErrNoListOfLists, mergepatch.ErrPatchContentNotMatchRetainKeys: + return apierrors.NewGenericServerResponse(http.StatusUnprocessableEntity, "", schema.GroupResource{}, "", err.Error(), 0, false) + default: + return err + } +} diff --git a/internal/resourcemodifiers/strategic_merge_patch_test.go b/internal/resourcemodifiers/strategic_merge_patch_test.go new file mode 100644 index 000000000..6c3c700f5 --- /dev/null +++ b/internal/resourcemodifiers/strategic_merge_patch_test.go @@ -0,0 +1,52 @@ +package resourcemodifiers + +import ( + "testing" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" +) + +func TestStrategicMergePatchFailure(t *testing.T) { + tests := []struct { + name string + data string + kind string + }{ + { + name: "patch with unknown kind", + data: "{}", + kind: "BadKind", + }, + { + name: "patch with bad yaml", + data: "a: b:", + kind: "Pod", + }, + { + name: "patch with bad json", + data: `{"a"::1}`, + kind: "Pod", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scheme := runtime.NewScheme() + err := clientgoscheme.AddToScheme(scheme) + assert.NoError(t, err) + pt := &StrategicMergePatcher{ + patches: []StrategicMergePatch{{PatchData: tt.data}}, + scheme: scheme, + } + + u := &unstructured.Unstructured{} + u.SetGroupVersionKind(schema.GroupVersionKind{Version: "v1", Kind: tt.kind}) + _, err = pt.Patch(u, logrus.New()) + assert.Error(t, err) + }) + } +} diff --git a/internal/storage/storagelocation_test.go b/internal/storage/storagelocation_test.go index bd5a94aa9..650e7338a 100644 --- a/internal/storage/storagelocation_test.go +++ b/internal/storage/storagelocation_test.go @@ -26,8 +26,8 @@ import ( velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/builder" - "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme" velerotest "github.com/vmware-tanzu/velero/pkg/test" + "github.com/vmware-tanzu/velero/pkg/util" ) func TestIsReadyToValidate(t *testing.T) { @@ -163,7 +163,7 @@ func TestListBackupStorageLocations(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - client := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(tt.backupLocations).Build() + client := fake.NewClientBuilder().WithScheme(util.VeleroScheme).WithRuntimeObjects(tt.backupLocations).Build() if tt.expectError { _, err := ListBackupStorageLocations(context.Background(), client, "ns-1") g.Expect(err).NotTo(BeNil()) diff --git a/netlify.toml b/netlify.toml index d270f9aba..ed2de9787 100644 --- a/netlify.toml +++ b/netlify.toml @@ -1,7 +1,7 @@ [build] base = "site/" command = "hugo --gc --minify" - publish = "public" + publish = "site/public" [context.production.environment] HUGO_VERSION = "0.73.0" diff --git a/pkg/apis/velero/v1/backup_types.go b/pkg/apis/velero/v1/backup_types.go index 888097a0e..628eda393 100644 --- a/pkg/apis/velero/v1/backup_types.go +++ b/pkg/apis/velero/v1/backup_types.go @@ -268,12 +268,12 @@ type ExecHook struct { type HookErrorMode string const ( - // HookErrorModeContinue means that an error from a hook is acceptable, and the backup can - // proceed. + // HookErrorModeContinue means that an error from a hook is acceptable and the backup/restore can + // proceed with the rest of hooks' execution. This backup/restore should be in `PartiallyFailed` status. HookErrorModeContinue HookErrorMode = "Continue" - // HookErrorModeFail means that an error from a hook is problematic, and the backup should be in - // error. + // HookErrorModeFail means that an error from a hook is problematic and Velero should stop executing following hooks. + // This backup/restore should be in `PartiallyFailed` status. HookErrorModeFail HookErrorMode = "Fail" ) diff --git a/pkg/apis/velero/v2alpha1/data_download_types.go b/pkg/apis/velero/v2alpha1/data_download_types.go index bd18a815a..17fe40a26 100644 --- a/pkg/apis/velero/v2alpha1/data_download_types.go +++ b/pkg/apis/velero/v2alpha1/data_download_types.go @@ -131,6 +131,7 @@ type DataDownloadStatus struct { // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since this DataDownload was created" // +kubebuilder:printcolumn:name="Node",type="string",JSONPath=".status.node",description="Name of the node where the DataDownload is processed" +// DataDownload acts as the protocol between data mover plugins and data mover controller for the datamover restore operation type DataDownload struct { metav1.TypeMeta `json:",inline"` diff --git a/pkg/apis/velero/v2alpha1/data_upload_types.go b/pkg/apis/velero/v2alpha1/data_upload_types.go index 1923e844d..4fb822ff4 100644 --- a/pkg/apis/velero/v2alpha1/data_upload_types.go +++ b/pkg/apis/velero/v2alpha1/data_upload_types.go @@ -164,6 +164,7 @@ type DataUploadStatus struct { // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since this DataUpload was created" // +kubebuilder:printcolumn:name="Node",type="string",JSONPath=".status.node",description="Name of the node where the DataUpload is processed" +// DataUpload acts as the protocol between data mover plugins and data mover controller for the datamover backup operation type DataUpload struct { metav1.TypeMeta `json:",inline"` diff --git a/pkg/backup/backup_test.go b/pkg/backup/backup_test.go index c0642f818..e9c15dbc4 100644 --- a/pkg/backup/backup_test.go +++ b/pkg/backup/backup_test.go @@ -46,6 +46,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/builder" "github.com/vmware-tanzu/velero/pkg/client" "github.com/vmware-tanzu/velero/pkg/discovery" + "github.com/vmware-tanzu/velero/pkg/features" "github.com/vmware-tanzu/velero/pkg/itemoperation" "github.com/vmware-tanzu/velero/pkg/kuberesource" "github.com/vmware-tanzu/velero/pkg/plugin/velero" @@ -1379,6 +1380,12 @@ func TestBackupItemActionsForSkippedPV(t *testing.T) { expectNotSkippedPVs: []string{"pv-1"}, }, } + // Enable CSI feature before running the test, because Velero will check whether + // CSI feature is enabled before executing CSI plugin actions. + features.NewFeatureFlagSet("EnableCSI") + defer func() { + features.NewFeatureFlagSet("") + }() for _, tc := range tests { t.Run(tc.name, func(tt *testing.T) { var ( diff --git a/pkg/backup/item_backupper.go b/pkg/backup/item_backupper.go index ddee9a0cb..61f6834d6 100644 --- a/pkg/backup/item_backupper.go +++ b/pkg/backup/item_backupper.go @@ -52,6 +52,7 @@ import ( vsv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/volumesnapshotter/v1" "github.com/vmware-tanzu/velero/pkg/podvolume" "github.com/vmware-tanzu/velero/pkg/util/boolptr" + csiutil "github.com/vmware-tanzu/velero/pkg/util/csi" pdvolumeutil "github.com/vmware-tanzu/velero/pkg/util/podvolume" "github.com/vmware-tanzu/velero/pkg/volume" ) @@ -361,6 +362,14 @@ func (ib *itemBackupper) executeActions( ib.trackSkippedPV(obj, groupResource, "", "skipped due to resource policy ", log) continue } + + // If the EnableCSI feature is not enabled, but the executing action is from CSI plugin, skip the action. + if csiutil.ShouldSkipAction(actionName) { + log.Infof("Skip action %s for resource %s:%s/%s, because the CSI feature is not enabled. Feature setting is %s.", + actionName, groupResource.String(), metadata.GetNamespace(), metadata.GetName(), features.Serialize()) + continue + } + updatedItem, additionalItemIdentifiers, operationID, postOperationItems, err := action.Execute(obj, ib.backupRequest.Backup) if err != nil { return nil, itemFiles, errors.Wrapf(err, "error executing custom action (groupResource=%s, namespace=%s, name=%s)", groupResource.String(), namespace, name) @@ -652,6 +661,7 @@ func (ib *itemBackupper) getMatchAction(obj runtime.Unstructured, groupResource } return ib.backupRequest.ResPolicies.GetMatchAction(pv) } + return nil, nil } diff --git a/pkg/backup/request.go b/pkg/backup/request.go index e9da4bdde..44bc5578f 100644 --- a/pkg/backup/request.go +++ b/pkg/backup/request.go @@ -20,8 +20,6 @@ import ( "fmt" "sort" - snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" - "github.com/vmware-tanzu/velero/internal/hook" "github.com/vmware-tanzu/velero/internal/resourcepolicies" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" @@ -51,7 +49,6 @@ type Request struct { VolumeSnapshots []*volume.Snapshot PodVolumeBackups []*velerov1api.PodVolumeBackup BackedUpItems map[itemKey]struct{} - CSISnapshots []snapshotv1api.VolumeSnapshot itemOperationsList *[]*itemoperation.BackupOperation ResPolicies *resourcepolicies.Policies SkippedPVTracker *skipPVTracker diff --git a/pkg/backup/snapshots.go b/pkg/backup/snapshots.go new file mode 100644 index 000000000..a5c659705 --- /dev/null +++ b/pkg/backup/snapshots.go @@ -0,0 +1,68 @@ +package backup + +import ( + "context" + + snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" + snapshotv1listers "github.com/kubernetes-csi/external-snapshotter/client/v4/listers/volumesnapshot/v1" + "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/util/sets" + kbclient "sigs.k8s.io/controller-runtime/pkg/client" + + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/features" + "github.com/vmware-tanzu/velero/pkg/label" + "github.com/vmware-tanzu/velero/pkg/util/boolptr" +) + +// Common function to update the status of CSI snapshots +// returns VolumeSnapshot, VolumeSnapshotContent, VolumeSnapshotClasses referenced +func UpdateBackupCSISnapshotsStatus(client kbclient.Client, volumeSnapshotLister snapshotv1listers.VolumeSnapshotLister, backup *velerov1api.Backup, backupLog logrus.FieldLogger) (volumeSnapshots []snapshotv1api.VolumeSnapshot, volumeSnapshotContents []snapshotv1api.VolumeSnapshotContent, volumeSnapshotClasses []snapshotv1api.VolumeSnapshotClass) { + if boolptr.IsSetToTrue(backup.Spec.SnapshotMoveData) { + backupLog.Info("backup SnapshotMoveData is set to true, skip VolumeSnapshot resource persistence.") + } else if features.IsEnabled(velerov1api.CSIFeatureFlag) { + selector := label.NewSelectorForBackup(backup.Name) + vscList := &snapshotv1api.VolumeSnapshotContentList{} + + if volumeSnapshotLister != nil { + tmpVSs, err := volumeSnapshotLister.List(label.NewSelectorForBackup(backup.Name)) + if err != nil { + backupLog.Error(err) + } + for _, vs := range tmpVSs { + volumeSnapshots = append(volumeSnapshots, *vs) + } + } + + err := client.List(context.Background(), vscList, &kbclient.ListOptions{LabelSelector: selector}) + if err != nil { + backupLog.Error(err) + } + if len(vscList.Items) >= 0 { + volumeSnapshotContents = vscList.Items + } + + vsClassSet := sets.NewString() + for index := range volumeSnapshotContents { + // persist the volumesnapshotclasses referenced by vsc + if volumeSnapshotContents[index].Spec.VolumeSnapshotClassName != nil && !vsClassSet.Has(*volumeSnapshotContents[index].Spec.VolumeSnapshotClassName) { + vsClass := &snapshotv1api.VolumeSnapshotClass{} + if err := client.Get(context.TODO(), kbclient.ObjectKey{Name: *volumeSnapshotContents[index].Spec.VolumeSnapshotClassName}, vsClass); err != nil { + backupLog.Error(err) + } else { + vsClassSet.Insert(*volumeSnapshotContents[index].Spec.VolumeSnapshotClassName) + volumeSnapshotClasses = append(volumeSnapshotClasses, *vsClass) + } + } + } + backup.Status.CSIVolumeSnapshotsAttempted = len(volumeSnapshots) + csiVolumeSnapshotsCompleted := 0 + for _, vs := range volumeSnapshots { + if vs.Status != nil && boolptr.IsSetToTrue(vs.Status.ReadyToUse) { + csiVolumeSnapshotsCompleted++ + } + } + backup.Status.CSIVolumeSnapshotsCompleted = csiVolumeSnapshotsCompleted + } + return volumeSnapshots, volumeSnapshotContents, volumeSnapshotClasses +} diff --git a/pkg/builder/backup_builder.go b/pkg/builder/backup_builder.go index f2c970577..038e75350 100644 --- a/pkg/builder/backup_builder.go +++ b/pkg/builder/backup_builder.go @@ -305,3 +305,9 @@ func (b *BackupBuilder) ParallelFilesUpload(parallel int) *BackupBuilder { b.object.Spec.UploaderConfig.ParallelFilesUpload = parallel return b } + +// WithStatus sets the Backup's status. +func (b *BackupBuilder) WithStatus(status velerov1api.BackupStatus) *BackupBuilder { + b.object.Status = status + return b +} diff --git a/pkg/builder/delete_backup_request_builder.go b/pkg/builder/delete_backup_request_builder.go index 4788795a3..c0dfdbdbf 100644 --- a/pkg/builder/delete_backup_request_builder.go +++ b/pkg/builder/delete_backup_request_builder.go @@ -1,3 +1,19 @@ +/* +Copyright 2023 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package builder import ( diff --git a/pkg/builder/item_operation_builder.go b/pkg/builder/item_operation_builder.go index 8ca9d7506..210752b82 100644 --- a/pkg/builder/item_operation_builder.go +++ b/pkg/builder/item_operation_builder.go @@ -1,3 +1,19 @@ +/* +Copyright 2023 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package builder import ( diff --git a/pkg/builder/node_builder.go b/pkg/builder/node_builder.go index 52e2c1e2e..d3b6f51ec 100644 --- a/pkg/builder/node_builder.go +++ b/pkg/builder/node_builder.go @@ -41,6 +41,11 @@ func ForNode(name string) *NodeBuilder { } } +func (b *NodeBuilder) Labels(labels map[string]string) *NodeBuilder { + b.object.Labels = labels + return b +} + // Result returns the built Node. func (b *NodeBuilder) Result() *corev1api.Node { return b.object diff --git a/pkg/builder/node_selector_builder.go b/pkg/builder/node_selector_builder.go index 11ce306b5..558be1657 100644 --- a/pkg/builder/node_selector_builder.go +++ b/pkg/builder/node_selector_builder.go @@ -1,3 +1,19 @@ +/* +Copyright 2023 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package builder import corev1api "k8s.io/api/core/v1" diff --git a/pkg/builder/pod_builder.go b/pkg/builder/pod_builder.go index 8931c14b9..886d7a411 100644 --- a/pkg/builder/pod_builder.go +++ b/pkg/builder/pod_builder.go @@ -101,3 +101,8 @@ func (b *PodBuilder) ContainerStatuses(containerStatuses ...*corev1api.Container } return b } + +func (b *PodBuilder) Phase(phase corev1api.PodPhase) *PodBuilder { + b.object.Status.Phase = phase + return b +} diff --git a/pkg/builder/pod_volume_restore_builder.go b/pkg/builder/pod_volume_restore_builder.go index c131a0384..3d4da94d6 100644 --- a/pkg/builder/pod_volume_restore_builder.go +++ b/pkg/builder/pod_volume_restore_builder.go @@ -1,3 +1,19 @@ +/* +Copyright 2023 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package builder import ( diff --git a/pkg/builder/volume_snapshot_builder.go b/pkg/builder/volume_snapshot_builder.go index 19815c0f0..bbaedd16e 100644 --- a/pkg/builder/volume_snapshot_builder.go +++ b/pkg/builder/volume_snapshot_builder.go @@ -67,3 +67,8 @@ func (v *VolumeSnapshotBuilder) BoundVolumeSnapshotContentName(vscName string) * v.object.Status.BoundVolumeSnapshotContentName = &vscName return v } + +func (v *VolumeSnapshotBuilder) SourcePVC(name string) *VolumeSnapshotBuilder { + v.object.Spec.Source.PersistentVolumeClaimName = &name + return v +} diff --git a/pkg/client/factory.go b/pkg/client/factory.go index 5c1ffc545..9ff2040c6 100644 --- a/pkg/client/factory.go +++ b/pkg/client/factory.go @@ -33,7 +33,6 @@ import ( velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" velerov2alpha1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1" - clientset "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" ) //go:generate mockery --name Factory @@ -42,9 +41,6 @@ import ( type Factory interface { // BindFlags binds common flags (--kubeconfig, --namespace) to the passed-in FlagSet. BindFlags(flags *pflag.FlagSet) - // Client returns a VeleroClient. It uses the following priority to specify the cluster - // configuration: --kubeconfig flag, KUBECONFIG environment variable, in-cluster configuration. - Client() (clientset.Interface, error) // KubeClient returns a Kubernetes client. It uses the following priority to specify the cluster // configuration: --kubeconfig flag, KUBECONFIG environment variable, in-cluster configuration. KubeClient() (kubernetes.Interface, error) @@ -115,19 +111,6 @@ func (f *factory) ClientConfig() (*rest.Config, error) { return Config(f.kubeconfig, f.kubecontext, f.baseName, f.clientQPS, f.clientBurst) } -func (f *factory) Client() (clientset.Interface, error) { - clientConfig, err := f.ClientConfig() - if err != nil { - return nil, err - } - - veleroClient, err := clientset.NewForConfig(clientConfig) - if err != nil { - return nil, errors.WithStack(err) - } - return veleroClient, nil -} - func (f *factory) KubeClient() (kubernetes.Interface, error) { clientConfig, err := f.ClientConfig() if err != nil { diff --git a/pkg/client/factory_test.go b/pkg/client/factory_test.go index 547be6bff..50df1c816 100644 --- a/pkg/client/factory_test.go +++ b/pkg/client/factory_test.go @@ -112,11 +112,6 @@ func TestFactory(t *testing.T) { assert.Equal(t, test.burst, clientConfig.Burst) strings.Contains(clientConfig.UserAgent, test.baseName) - client, _ := f.Client() - _, e := client.Discovery().ServerGroups() - assert.Contains(t, e.Error(), fmt.Sprintf("Get \"%s/api?timeout=", test.expectedHost)) - assert.NotNil(t, client) - kubeClient, _ := f.KubeClient() group := kubeClient.NodeV1().RESTClient().APIVersion().Group assert.NotNil(t, kubeClient) diff --git a/pkg/client/mocks/Factory.go b/pkg/client/mocks/Factory.go index 3bd016047..1bec839cd 100644 --- a/pkg/client/mocks/Factory.go +++ b/pkg/client/mocks/Factory.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.30.1. DO NOT EDIT. +// Code generated by mockery v2.20.0. DO NOT EDIT. package mocks @@ -13,8 +13,6 @@ import ( pkgclient "sigs.k8s.io/controller-runtime/pkg/client" rest "k8s.io/client-go/rest" - - versioned "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" ) // Factory is an autogenerated mock type for the Factory type @@ -27,32 +25,6 @@ func (_m *Factory) BindFlags(flags *pflag.FlagSet) { _m.Called(flags) } -// Client provides a mock function with given fields: -func (_m *Factory) Client() (versioned.Interface, error) { - ret := _m.Called() - - var r0 versioned.Interface - var r1 error - if rf, ok := ret.Get(0).(func() (versioned.Interface, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() versioned.Interface); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(versioned.Interface) - } - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // ClientConfig provides a mock function with given fields: func (_m *Factory) ClientConfig() (*rest.Config, error) { ret := _m.Called() @@ -212,12 +184,13 @@ func (_m *Factory) SetClientQPS(_a0 float32) { _m.Called(_a0) } -// NewFactory creates a new instance of Factory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewFactory(t interface { +type mockConstructorTestingTNewFactory interface { mock.TestingT Cleanup(func()) -}) *Factory { +} + +// NewFactory creates a new instance of Factory. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewFactory(t mockConstructorTestingTNewFactory) *Factory { mock := &Factory{} mock.Mock.Test(t) diff --git a/pkg/client/retry.go b/pkg/client/retry.go index f9674e1ed..a8f28e8ce 100644 --- a/pkg/client/retry.go +++ b/pkg/client/retry.go @@ -25,20 +25,14 @@ import ( ) func CreateRetryGenerateName(client kbclient.Client, ctx context.Context, obj kbclient.Object) error { - return CreateRetryGenerateNameWithFunc(obj, func() error { - return client.Create(ctx, obj, &kbclient.CreateOptions{}) - }) -} - -func CreateRetryGenerateNameWithFunc(obj kbclient.Object, createFn func() error) error { retryCreateFn := func() error { // needed to ensure that the name from the failed create isn't left on the object between retries obj.SetName("") - return createFn() + return client.Create(ctx, obj, &kbclient.CreateOptions{}) } if obj.GetGenerateName() != "" && obj.GetName() == "" { return retry.OnError(retry.DefaultRetry, apierrors.IsAlreadyExists, retryCreateFn) } else { - return createFn() + return client.Create(ctx, obj, &kbclient.CreateOptions{}) } } diff --git a/pkg/cmd/cli/debug/debug.go b/pkg/cmd/cli/debug/debug.go index 7c55c0f58..f54ee9571 100644 --- a/pkg/cmd/cli/debug/debug.go +++ b/pkg/cmd/cli/debug/debug.go @@ -30,9 +30,12 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/vmware-tanzu/crash-diagnostics/exec" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/clientcmd" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/client" "github.com/vmware-tanzu/velero/pkg/cmd" ) @@ -110,30 +113,32 @@ func (o *option) complete(f client.Factory, fs *pflag.FlagSet) error { } func (o *option) validate(f client.Factory) error { - kubeClient, err := f.KubeClient() + crClient, err := f.KubebuilderClient() if err != nil { return err } - l, err := kubeClient.AppsV1().Deployments(o.namespace).List(context.TODO(), metav1.ListOptions{ - LabelSelector: "component=velero", + deploymentList := new(appsv1.DeploymentList) + selector, err := labels.Parse("component=velero") + cmd.CheckError(err) + err = crClient.List(context.TODO(), deploymentList, &ctrlclient.ListOptions{ + Namespace: o.namespace, + LabelSelector: selector, }) if err != nil { return errors.Wrap(err, "failed to check velero deployment") } - if len(l.Items) == 0 { + if len(deploymentList.Items) == 0 { return fmt.Errorf("velero deployment does not exist in namespace: %s", o.namespace) } - veleroClient, err := f.Client() - if err != nil { - return err - } if len(o.backup) > 0 { - if _, err := veleroClient.VeleroV1().Backups(o.namespace).Get(context.TODO(), o.backup, metav1.GetOptions{}); err != nil { + backup := new(velerov1api.Backup) + if err := crClient.Get(context.TODO(), ctrlclient.ObjectKey{Namespace: o.namespace, Name: o.backup}, backup); err != nil { return err } } if len(o.restore) > 0 { - if _, err := veleroClient.VeleroV1().Restores(o.namespace).Get(context.TODO(), o.restore, metav1.GetOptions{}); err != nil { + restore := new(velerov1api.Restore) + if err := crClient.Get(context.TODO(), ctrlclient.ObjectKey{Namespace: o.namespace, Name: o.restore}, restore); err != nil { return err } } diff --git a/pkg/cmd/cli/nodeagent/server.go b/pkg/cmd/cli/nodeagent/server.go index 105e37052..835b899c3 100644 --- a/pkg/cmd/cli/nodeagent/server.go +++ b/pkg/cmd/cli/nodeagent/server.go @@ -19,6 +19,7 @@ package nodeagent import ( "context" "fmt" + "math" "net/http" "os" "strings" @@ -32,6 +33,7 @@ import ( storagev1api "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" @@ -53,7 +55,9 @@ import ( "github.com/vmware-tanzu/velero/pkg/cmd" "github.com/vmware-tanzu/velero/pkg/cmd/util/signals" "github.com/vmware-tanzu/velero/pkg/controller" + "github.com/vmware-tanzu/velero/pkg/datapath" "github.com/vmware-tanzu/velero/pkg/metrics" + "github.com/vmware-tanzu/velero/pkg/nodeagent" "github.com/vmware-tanzu/velero/pkg/repository" "github.com/vmware-tanzu/velero/pkg/util/filesystem" "github.com/vmware-tanzu/velero/pkg/util/logging" @@ -73,6 +77,7 @@ const ( defaultResourceTimeout = 10 * time.Minute defaultDataMoverPrepareTimeout = 30 * time.Minute + defaultDataPathConcurrentNum = 1 ) type nodeAgentServerConfig struct { @@ -132,6 +137,7 @@ type nodeAgentServer struct { config nodeAgentServerConfig kubeClient kubernetes.Interface csiSnapshotClient *snapshotv1client.Clientset + dataPathMgr *datapath.Manager } func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, config nodeAgentServerConfig) (*nodeAgentServer, error) { @@ -219,6 +225,10 @@ func newNodeAgentServer(logger logrus.FieldLogger, factory client.Factory, confi if err != nil { return nil, err } + + dataPathConcurrentNum := s.getDataPathConcurrentNum(defaultDataPathConcurrentNum) + s.dataPathMgr = datapath.NewManager(dataPathConcurrentNum) + return s, nil } @@ -263,24 +273,24 @@ func (s *nodeAgentServer) run() { credentialGetter := &credentials.CredentialGetter{FromFile: credentialFileStore, FromSecret: credSecretStore} repoEnsurer := repository.NewEnsurer(s.mgr.GetClient(), s.logger, s.config.resourceTimeout) - pvbReconciler := controller.NewPodVolumeBackupReconciler(s.mgr.GetClient(), repoEnsurer, + pvbReconciler := controller.NewPodVolumeBackupReconciler(s.mgr.GetClient(), s.dataPathMgr, repoEnsurer, credentialGetter, s.nodeName, s.mgr.GetScheme(), s.metrics, s.logger) if err := pvbReconciler.SetupWithManager(s.mgr); err != nil { s.logger.Fatal(err, "unable to create controller", "controller", controller.PodVolumeBackup) } - if err = controller.NewPodVolumeRestoreReconciler(s.mgr.GetClient(), repoEnsurer, credentialGetter, s.logger).SetupWithManager(s.mgr); err != nil { + if err = controller.NewPodVolumeRestoreReconciler(s.mgr.GetClient(), s.dataPathMgr, repoEnsurer, credentialGetter, s.logger).SetupWithManager(s.mgr); err != nil { s.logger.WithError(err).Fatal("Unable to create the pod volume restore controller") } - dataUploadReconciler := controller.NewDataUploadReconciler(s.mgr.GetClient(), s.kubeClient, s.csiSnapshotClient.SnapshotV1(), repoEnsurer, clock.RealClock{}, credentialGetter, s.nodeName, s.fileSystem, s.config.dataMoverPrepareTimeout, s.logger, s.metrics) + dataUploadReconciler := controller.NewDataUploadReconciler(s.mgr.GetClient(), s.kubeClient, s.csiSnapshotClient.SnapshotV1(), s.dataPathMgr, repoEnsurer, clock.RealClock{}, credentialGetter, s.nodeName, s.fileSystem, s.config.dataMoverPrepareTimeout, s.logger, s.metrics) s.markDataUploadsCancel(dataUploadReconciler) if err = dataUploadReconciler.SetupWithManager(s.mgr); err != nil { s.logger.WithError(err).Fatal("Unable to create the data upload controller") } - dataDownloadReconciler := controller.NewDataDownloadReconciler(s.mgr.GetClient(), s.kubeClient, repoEnsurer, credentialGetter, s.nodeName, s.config.dataMoverPrepareTimeout, s.logger, s.metrics) + dataDownloadReconciler := controller.NewDataDownloadReconciler(s.mgr.GetClient(), s.kubeClient, s.dataPathMgr, repoEnsurer, credentialGetter, s.nodeName, s.config.dataMoverPrepareTimeout, s.logger, s.metrics) s.markDataDownloadsCancel(dataDownloadReconciler) if err = dataDownloadReconciler.SetupWithManager(s.mgr); err != nil { s.logger.WithError(err).Fatal("Unable to create the data download controller") @@ -478,3 +488,65 @@ func (s *nodeAgentServer) markInProgressPVRsFailed(client ctrlclient.Client) { s.logger.WithField("podvolumerestore", pvr.GetName()).Warn(pvr.Status.Message) } } + +var getConfigsFunc = nodeagent.GetConfigs + +func (s *nodeAgentServer) getDataPathConcurrentNum(defaultNum int) int { + configs, err := getConfigsFunc(s.ctx, s.namespace, s.kubeClient) + if err != nil { + s.logger.WithError(err).Warn("Failed to get node agent configs") + return defaultNum + } + + if configs == nil || configs.DataPathConcurrency == nil { + s.logger.Infof("Concurrency configs are not found, use the default number %v", defaultNum) + return defaultNum + } + + globalNum := configs.DataPathConcurrency.GlobalConfig + + if globalNum <= 0 { + s.logger.Warnf("Global number %v is invalid, use the default value %v", globalNum, defaultNum) + globalNum = defaultNum + } + + if len(configs.DataPathConcurrency.PerNodeConfig) == 0 { + return globalNum + } + + curNode, err := s.kubeClient.CoreV1().Nodes().Get(s.ctx, s.nodeName, metav1.GetOptions{}) + if err != nil { + s.logger.WithError(err).Warnf("Failed to get node info for %s, use the global number %v", s.nodeName, globalNum) + return globalNum + } + + concurrentNum := math.MaxInt32 + + for _, rule := range configs.DataPathConcurrency.PerNodeConfig { + selector, err := metav1.LabelSelectorAsSelector(&rule.NodeSelector) + if err != nil { + s.logger.WithError(err).Warnf("Failed to parse rule with label selector %s, skip it", rule.NodeSelector.String()) + continue + } + + if rule.Number <= 0 { + s.logger.Warnf("Rule with label selector %s is with an invalid number %v, skip it", rule.NodeSelector.String(), rule.Number) + continue + } + + if selector.Matches(labels.Set(curNode.GetLabels())) { + if concurrentNum > rule.Number { + concurrentNum = rule.Number + } + } + } + + if concurrentNum == math.MaxInt32 { + s.logger.Infof("Per node number for node %s is not found, use the global number %v", s.nodeName, globalNum) + concurrentNum = globalNum + } else { + s.logger.Infof("Use the per node number %v over global number %v for node %s", concurrentNum, globalNum, s.nodeName) + } + + return concurrentNum +} diff --git a/pkg/cmd/cli/nodeagent/server_test.go b/pkg/cmd/cli/nodeagent/server_test.go index d66fc08eb..4472dfce1 100644 --- a/pkg/cmd/cli/nodeagent/server_test.go +++ b/pkg/cmd/cli/nodeagent/server_test.go @@ -17,16 +17,22 @@ package nodeagent import ( "context" + "fmt" "os" "path/filepath" + "strings" "testing" + "github.com/pkg/errors" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" "github.com/vmware-tanzu/velero/pkg/builder" + "github.com/vmware-tanzu/velero/pkg/nodeagent" testutil "github.com/vmware-tanzu/velero/pkg/test" ) @@ -107,3 +113,259 @@ func Test_validatePodVolumesHostPath(t *testing.T) { }) } } + +func Test_getDataPathConcurrentNum(t *testing.T) { + defaultNum := 100001 + globalNum := 6 + nodeName := "node-agent-node" + node1 := builder.ForNode("node-agent-node").Result() + node2 := builder.ForNode("node-agent-node").Labels(map[string]string{ + "host-name": "node-1", + "xxxx": "yyyyy", + }).Result() + + invalidLabelSelector := metav1.LabelSelector{ + MatchLabels: map[string]string{ + "inva/lid": "inva/lid", + }, + } + validLabelSelector1 := metav1.LabelSelector{ + MatchLabels: map[string]string{ + "host-name": "node-1", + }, + } + validLabelSelector2 := metav1.LabelSelector{ + MatchLabels: map[string]string{ + "xxxx": "yyyyy", + }, + } + + tests := []struct { + name string + getFunc func(context.Context, string, kubernetes.Interface) (*nodeagent.Configs, error) + setKubeClient bool + kubeClientObj []runtime.Object + expectNum int + expectLog string + }{ + { + name: "failed to get configs", + getFunc: func(context.Context, string, kubernetes.Interface) (*nodeagent.Configs, error) { + return nil, errors.New("fake-get-error") + }, + expectLog: "Failed to get node agent configs", + expectNum: defaultNum, + }, + { + name: "configs cm not found", + getFunc: func(context.Context, string, kubernetes.Interface) (*nodeagent.Configs, error) { + return nil, nil + }, + expectLog: fmt.Sprintf("Concurrency configs are not found, use the default number %v", defaultNum), + expectNum: defaultNum, + }, + { + name: "configs cm's data path concurrency is nil", + getFunc: func(context.Context, string, kubernetes.Interface) (*nodeagent.Configs, error) { + return &nodeagent.Configs{}, nil + }, + expectLog: fmt.Sprintf("Concurrency configs are not found, use the default number %v", defaultNum), + expectNum: defaultNum, + }, + { + name: "global number is invalid", + getFunc: func(context.Context, string, kubernetes.Interface) (*nodeagent.Configs, error) { + return &nodeagent.Configs{ + DataPathConcurrency: &nodeagent.DataPathConcurrency{ + GlobalConfig: -1, + }, + }, nil + }, + expectLog: fmt.Sprintf("Global number %v is invalid, use the default value %v", -1, defaultNum), + expectNum: defaultNum, + }, + { + name: "global number is valid", + getFunc: func(context.Context, string, kubernetes.Interface) (*nodeagent.Configs, error) { + return &nodeagent.Configs{ + DataPathConcurrency: &nodeagent.DataPathConcurrency{ + GlobalConfig: globalNum, + }, + }, nil + }, + expectNum: globalNum, + }, + { + name: "node is not found", + getFunc: func(context.Context, string, kubernetes.Interface) (*nodeagent.Configs, error) { + return &nodeagent.Configs{ + DataPathConcurrency: &nodeagent.DataPathConcurrency{ + GlobalConfig: globalNum, + PerNodeConfig: []nodeagent.RuledConfigs{ + { + Number: 100, + }, + }, + }, + }, nil + }, + setKubeClient: true, + expectLog: fmt.Sprintf("Failed to get node info for %s, use the global number %v", nodeName, globalNum), + expectNum: globalNum, + }, + { + name: "failed to get selector", + getFunc: func(context.Context, string, kubernetes.Interface) (*nodeagent.Configs, error) { + return &nodeagent.Configs{ + DataPathConcurrency: &nodeagent.DataPathConcurrency{ + GlobalConfig: globalNum, + PerNodeConfig: []nodeagent.RuledConfigs{ + { + NodeSelector: invalidLabelSelector, + Number: 100, + }, + }, + }, + }, nil + }, + setKubeClient: true, + kubeClientObj: []runtime.Object{node1}, + expectLog: fmt.Sprintf("Failed to parse rule with label selector %s, skip it", invalidLabelSelector.String()), + expectNum: globalNum, + }, + { + name: "rule number is invalid", + getFunc: func(context.Context, string, kubernetes.Interface) (*nodeagent.Configs, error) { + return &nodeagent.Configs{ + DataPathConcurrency: &nodeagent.DataPathConcurrency{ + GlobalConfig: globalNum, + PerNodeConfig: []nodeagent.RuledConfigs{ + { + NodeSelector: validLabelSelector1, + Number: -1, + }, + }, + }, + }, nil + }, + setKubeClient: true, + kubeClientObj: []runtime.Object{node1}, + expectLog: fmt.Sprintf("Rule with label selector %s is with an invalid number %v, skip it", validLabelSelector1.String(), -1), + expectNum: globalNum, + }, + { + name: "label doesn't match", + getFunc: func(context.Context, string, kubernetes.Interface) (*nodeagent.Configs, error) { + return &nodeagent.Configs{ + DataPathConcurrency: &nodeagent.DataPathConcurrency{ + GlobalConfig: globalNum, + PerNodeConfig: []nodeagent.RuledConfigs{ + { + NodeSelector: validLabelSelector1, + Number: -1, + }, + }, + }, + }, nil + }, + setKubeClient: true, + kubeClientObj: []runtime.Object{node1}, + expectLog: fmt.Sprintf("Per node number for node %s is not found, use the global number %v", nodeName, globalNum), + expectNum: globalNum, + }, + { + name: "match one rule", + getFunc: func(context.Context, string, kubernetes.Interface) (*nodeagent.Configs, error) { + return &nodeagent.Configs{ + DataPathConcurrency: &nodeagent.DataPathConcurrency{ + GlobalConfig: globalNum, + PerNodeConfig: []nodeagent.RuledConfigs{ + { + NodeSelector: validLabelSelector1, + Number: 66, + }, + }, + }, + }, nil + }, + setKubeClient: true, + kubeClientObj: []runtime.Object{node2}, + expectLog: fmt.Sprintf("Use the per node number %v over global number %v for node %s", 66, globalNum, nodeName), + expectNum: 66, + }, + { + name: "match multiple rules", + getFunc: func(context.Context, string, kubernetes.Interface) (*nodeagent.Configs, error) { + return &nodeagent.Configs{ + DataPathConcurrency: &nodeagent.DataPathConcurrency{ + GlobalConfig: globalNum, + PerNodeConfig: []nodeagent.RuledConfigs{ + { + NodeSelector: validLabelSelector1, + Number: 66, + }, + { + NodeSelector: validLabelSelector2, + Number: 36, + }, + }, + }, + }, nil + }, + setKubeClient: true, + kubeClientObj: []runtime.Object{node2}, + expectLog: fmt.Sprintf("Use the per node number %v over global number %v for node %s", 36, globalNum, nodeName), + expectNum: 36, + }, + { + name: "match multiple rules 2", + getFunc: func(context.Context, string, kubernetes.Interface) (*nodeagent.Configs, error) { + return &nodeagent.Configs{ + DataPathConcurrency: &nodeagent.DataPathConcurrency{ + GlobalConfig: globalNum, + PerNodeConfig: []nodeagent.RuledConfigs{ + { + NodeSelector: validLabelSelector1, + Number: 36, + }, + { + NodeSelector: validLabelSelector2, + Number: 66, + }, + }, + }, + }, nil + }, + setKubeClient: true, + kubeClientObj: []runtime.Object{node2}, + expectLog: fmt.Sprintf("Use the per node number %v over global number %v for node %s", 36, globalNum, nodeName), + expectNum: 36, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...) + + logBuffer := "" + + s := &nodeAgentServer{ + nodeName: nodeName, + logger: testutil.NewSingleLogger(&logBuffer), + } + + if test.setKubeClient { + s.kubeClient = fakeKubeClient + } + + getConfigsFunc = test.getFunc + + num := s.getDataPathConcurrentNum(defaultNum) + assert.Equal(t, test.expectNum, num) + if test.expectLog == "" { + assert.Equal(t, "", logBuffer) + } else { + assert.True(t, strings.Contains(logBuffer, test.expectLog)) + } + }) + } +} diff --git a/pkg/cmd/cli/repo/get.go b/pkg/cmd/cli/repo/get.go index 3a730c103..290e2e2ac 100644 --- a/pkg/cmd/cli/repo/get.go +++ b/pkg/cmd/cli/repo/get.go @@ -21,6 +21,8 @@ import ( "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/client" @@ -38,19 +40,25 @@ func NewGetCommand(f client.Factory, use string) *cobra.Command { err := output.ValidateFlags(c) cmd.CheckError(err) - veleroClient, err := f.Client() + crClient, err := f.KubebuilderClient() cmd.CheckError(err) var repos *api.BackupRepositoryList if len(args) > 0 { repos = new(api.BackupRepositoryList) for _, name := range args { - repo, err := veleroClient.VeleroV1().BackupRepositories(f.Namespace()).Get(context.TODO(), name, metav1.GetOptions{}) + repo := new(api.BackupRepository) + err := crClient.Get(context.TODO(), ctrlclient.ObjectKey{Namespace: f.Namespace(), Name: name}, repo) cmd.CheckError(err) repos.Items = append(repos.Items, *repo) } } else { - repos, err = veleroClient.VeleroV1().BackupRepositories(f.Namespace()).List(context.TODO(), listOptions) + selector := labels.NewSelector() + if listOptions.LabelSelector != "" { + selector, err = labels.Parse(listOptions.LabelSelector) + cmd.CheckError(err) + } + err = crClient.List(context.TODO(), repos, &ctrlclient.ListOptions{LabelSelector: selector}) cmd.CheckError(err) } diff --git a/pkg/cmd/cli/restore/logs.go b/pkg/cmd/cli/restore/logs.go index d721f324d..e88945868 100644 --- a/pkg/cmd/cli/restore/logs.go +++ b/pkg/cmd/cli/restore/logs.go @@ -24,7 +24,7 @@ import ( "github.com/spf13/cobra" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/client" @@ -49,13 +49,11 @@ func NewLogsCommand(f client.Factory) *cobra.Command { Run: func(c *cobra.Command, args []string) { restoreName := args[0] - veleroClient, err := f.Client() - cmd.CheckError(err) - kbClient, err := f.KubebuilderClient() cmd.CheckError(err) - restore, err := veleroClient.VeleroV1().Restores(f.Namespace()).Get(context.TODO(), restoreName, metav1.GetOptions{}) + restore := new(velerov1api.Restore) + err = kbClient.Get(context.TODO(), ctrlclient.ObjectKey{Namespace: f.Namespace(), Name: restoreName}, restore) if apierrors.IsNotFound(err) { cmd.Exit("Restore %q does not exist.", restoreName) } else if err != nil { diff --git a/pkg/cmd/cli/schedule/create.go b/pkg/cmd/cli/schedule/create.go index 4fc40e814..85f53acc3 100644 --- a/pkg/cmd/cli/schedule/create.go +++ b/pkg/cmd/cli/schedule/create.go @@ -115,7 +115,7 @@ func (o *CreateOptions) Complete(args []string, f client.Factory) error { func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error { var orders map[string]string - veleroClient, err := f.Client() + crClient, err := f.KubebuilderClient() if err != nil { return err } @@ -171,7 +171,7 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error { return err } - _, err = veleroClient.VeleroV1().Schedules(schedule.Namespace).Create(context.TODO(), schedule, metav1.CreateOptions{}) + err = crClient.Create(context.TODO(), schedule) if err != nil { return err } diff --git a/pkg/cmd/cli/schedule/describe.go b/pkg/cmd/cli/schedule/describe.go index b43cad45f..823f2edd1 100644 --- a/pkg/cmd/cli/schedule/describe.go +++ b/pkg/cmd/cli/schedule/describe.go @@ -22,6 +22,8 @@ import ( "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/client" @@ -36,19 +38,25 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command { Use: use + " [NAME1] [NAME2] [NAME...]", Short: "Describe schedules", Run: func(c *cobra.Command, args []string) { - veleroClient, err := f.Client() + crClient, err := f.KubebuilderClient() cmd.CheckError(err) var schedules *v1.ScheduleList if len(args) > 0 { schedules = new(v1.ScheduleList) for _, name := range args { - schedule, err := veleroClient.VeleroV1().Schedules(f.Namespace()).Get(context.TODO(), name, metav1.GetOptions{}) + schedule := new(v1.Schedule) + err := crClient.Get(context.TODO(), ctrlclient.ObjectKey{Namespace: f.Namespace(), Name: name}, schedule) cmd.CheckError(err) schedules.Items = append(schedules.Items, *schedule) } } else { - schedules, err = veleroClient.VeleroV1().Schedules(f.Namespace()).List(context.TODO(), listOptions) + selector := labels.NewSelector() + if listOptions.LabelSelector != "" { + selector, err = labels.Parse(listOptions.LabelSelector) + cmd.CheckError(err) + } + err = crClient.List(context.TODO(), schedules, &ctrlclient.ListOptions{LabelSelector: selector}) cmd.CheckError(err) } diff --git a/pkg/cmd/cli/schedule/get.go b/pkg/cmd/cli/schedule/get.go index 4a20da630..a16ae8fe6 100644 --- a/pkg/cmd/cli/schedule/get.go +++ b/pkg/cmd/cli/schedule/get.go @@ -21,6 +21,8 @@ import ( "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/client" @@ -38,19 +40,25 @@ func NewGetCommand(f client.Factory, use string) *cobra.Command { err := output.ValidateFlags(c) cmd.CheckError(err) - veleroClient, err := f.Client() + crClient, err := f.KubebuilderClient() cmd.CheckError(err) var schedules *api.ScheduleList if len(args) > 0 { schedules = new(api.ScheduleList) for _, name := range args { - schedule, err := veleroClient.VeleroV1().Schedules(f.Namespace()).Get(context.TODO(), name, metav1.GetOptions{}) + schedule := new(api.Schedule) + err := crClient.Get(context.TODO(), ctrlclient.ObjectKey{Name: name, Namespace: f.Namespace()}, schedule) cmd.CheckError(err) schedules.Items = append(schedules.Items, *schedule) } } else { - schedules, err = veleroClient.VeleroV1().Schedules(f.Namespace()).List(context.TODO(), listOptions) + selector := labels.NewSelector() + if listOptions.LabelSelector != "" { + selector, err = labels.Parse(listOptions.LabelSelector) + cmd.CheckError(err) + } + err := crClient.List(context.TODO(), schedules, &ctrlclient.ListOptions{LabelSelector: selector}) cmd.CheckError(err) } diff --git a/pkg/cmd/cli/schedule/pause.go b/pkg/cmd/cli/schedule/pause.go index 541fe6865..decda8260 100644 --- a/pkg/cmd/cli/schedule/pause.go +++ b/pkg/cmd/cli/schedule/pause.go @@ -25,6 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" kubeerrs "k8s.io/apimachinery/pkg/util/errors" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/client" @@ -63,7 +64,7 @@ func NewPauseCommand(f client.Factory, use string) *cobra.Command { } func runPause(f client.Factory, o *cli.SelectOptions, paused bool) error { - client, err := f.Client() + crClient, err := f.KubebuilderClient() if err != nil { return err } @@ -75,7 +76,8 @@ func runPause(f client.Factory, o *cli.SelectOptions, paused bool) error { switch { case len(o.Names) > 0: for _, name := range o.Names { - schedule, err := client.VeleroV1().Schedules(f.Namespace()).Get(context.TODO(), name, metav1.GetOptions{}) + schedule := new(velerov1api.Schedule) + err := crClient.Get(context.TODO(), ctrlclient.ObjectKey{Name: name, Namespace: f.Namespace()}, schedule) if err != nil { errs = append(errs, errors.WithStack(err)) continue @@ -83,11 +85,16 @@ func runPause(f client.Factory, o *cli.SelectOptions, paused bool) error { schedules = append(schedules, schedule) } default: - selector := labels.Everything().String() + selector := labels.Everything() if o.Selector.LabelSelector != nil { - selector = o.Selector.String() + convertedSelector, err := metav1.LabelSelectorAsSelector(o.Selector.LabelSelector) + if err != nil { + return errors.WithStack(err) + } + selector = convertedSelector } - res, err := client.VeleroV1().Schedules(f.Namespace()).List(context.TODO(), metav1.ListOptions{ + res := new(velerov1api.ScheduleList) + err := crClient.List(context.TODO(), res, &ctrlclient.ListOptions{ LabelSelector: selector, }) if err != nil { @@ -113,7 +120,7 @@ func runPause(f client.Factory, o *cli.SelectOptions, paused bool) error { continue } schedule.Spec.Paused = paused - if _, err := client.VeleroV1().Schedules(schedule.Namespace).Update(context.TODO(), schedule, metav1.UpdateOptions{}); err != nil { + if err := crClient.Update(context.TODO(), schedule); err != nil { return errors.Wrapf(err, "failed to update schedule %s", schedule.Name) } fmt.Printf("Schedule %s %s successfully\n", schedule.Name, msg) diff --git a/pkg/cmd/cli/snapshotlocation/create.go b/pkg/cmd/cli/snapshotlocation/create.go index b0e5e2f09..db55ad834 100644 --- a/pkg/cmd/cli/snapshotlocation/create.go +++ b/pkg/cmd/cli/snapshotlocation/create.go @@ -124,12 +124,12 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error { return err } - client, err := f.Client() + client, err := f.KubebuilderClient() if err != nil { return err } - if _, err := client.VeleroV1().VolumeSnapshotLocations(volumeSnapshotLocation.Namespace).Create(context.TODO(), volumeSnapshotLocation, metav1.CreateOptions{}); err != nil { + if err := client.Create(context.TODO(), volumeSnapshotLocation); err != nil { return errors.WithStack(err) } diff --git a/pkg/cmd/cli/snapshotlocation/get.go b/pkg/cmd/cli/snapshotlocation/get.go index c25f6c9f9..2acddbf7f 100644 --- a/pkg/cmd/cli/snapshotlocation/get.go +++ b/pkg/cmd/cli/snapshotlocation/get.go @@ -21,6 +21,7 @@ import ( "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kbclient "sigs.k8s.io/controller-runtime/pkg/client" api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/client" @@ -36,18 +37,19 @@ func NewGetCommand(f client.Factory, use string) *cobra.Command { Run: func(c *cobra.Command, args []string) { err := output.ValidateFlags(c) cmd.CheckError(err) - veleroClient, err := f.Client() + client, err := f.KubebuilderClient() cmd.CheckError(err) - var locations *api.VolumeSnapshotLocationList + locations := new(api.VolumeSnapshotLocationList) + if len(args) > 0 { - locations = new(api.VolumeSnapshotLocationList) for _, name := range args { - location, err := veleroClient.VeleroV1().VolumeSnapshotLocations(f.Namespace()).Get(context.TODO(), name, metav1.GetOptions{}) + location := new(api.VolumeSnapshotLocation) + err := client.Get(context.TODO(), kbclient.ObjectKey{Namespace: f.Namespace(), Name: name}, location) cmd.CheckError(err) locations.Items = append(locations.Items, *location) } } else { - locations, err = veleroClient.VeleroV1().VolumeSnapshotLocations(f.Namespace()).List(context.TODO(), listOptions) + err = client.List(context.TODO(), locations, &kbclient.ListOptions{Namespace: f.Namespace()}) cmd.CheckError(err) } _, err = output.PrintWithFormat(c, locations) diff --git a/pkg/cmd/server/plugin/plugin.go b/pkg/cmd/server/plugin/plugin.go index 3a4f97107..df1cf9f2d 100644 --- a/pkg/cmd/server/plugin/plugin.go +++ b/pkg/cmd/server/plugin/plugin.go @@ -152,12 +152,12 @@ func newPodVolumeRestoreItemAction(f client.Factory) plugincommon.HandlerInitial return nil, err } - veleroClient, err := f.Client() + crClient, err := f.KubebuilderClient() if err != nil { return nil, err } - return restore.NewPodVolumeRestoreAction(logger, client.CoreV1().ConfigMaps(f.Namespace()), veleroClient.VeleroV1().PodVolumeBackups(f.Namespace())), nil + return restore.NewPodVolumeRestoreAction(logger, client.CoreV1().ConfigMaps(f.Namespace()), crClient), nil } } diff --git a/pkg/cmd/server/server.go b/pkg/cmd/server/server.go index bde964aad..6333dd401 100644 --- a/pkg/cmd/server/server.go +++ b/pkg/cmd/server/server.go @@ -66,7 +66,6 @@ import ( "github.com/vmware-tanzu/velero/pkg/controller" velerodiscovery "github.com/vmware-tanzu/velero/pkg/discovery" "github.com/vmware-tanzu/velero/pkg/features" - clientset "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" "github.com/vmware-tanzu/velero/pkg/itemoperationmap" "github.com/vmware-tanzu/velero/pkg/metrics" "github.com/vmware-tanzu/velero/pkg/nodeagent" @@ -239,7 +238,7 @@ func NewCommand(f client.Factory) *cobra.Command { command.Flags().DurationVar(&config.resourceTimeout, "resource-timeout", config.resourceTimeout, "How long to wait for resource processes which are not covered by other specific timeout parameters. Default is 10 minutes.") command.Flags().IntVar(&config.maxConcurrentK8SConnections, "max-concurrent-k8s-connections", config.maxConcurrentK8SConnections, "Max concurrent connections number that Velero can create with kube-apiserver. Default is 30.") command.Flags().BoolVar(&config.defaultSnapshotMoveData, "default-snapshot-move-data", config.defaultSnapshotMoveData, "Move data by default for all snapshots supporting data movement.") - command.Flags().BoolVar(&config.disableInformerCache, "disable-informer-cache", config.disableInformerCache, "Disable informer cache for Get calls on restore. WIth this enabled, it will speed up restore in cases where there are backup resources which already exist in the cluster, but for very large clusters this will increase velero memory usage. Default is false (don't disable).") + command.Flags().BoolVar(&config.disableInformerCache, "disable-informer-cache", config.disableInformerCache, "Disable informer cache for Get calls on restore. With this enabled, it will speed up restore in cases where there are backup resources which already exist in the cluster, but for very large clusters this will increase velero memory usage. Default is false (don't disable).") return command } @@ -249,12 +248,12 @@ type server struct { metricsAddress string kubeClientConfig *rest.Config kubeClient kubernetes.Interface - veleroClient clientset.Interface discoveryClient discovery.DiscoveryInterface discoveryHelper velerodiscovery.Helper dynamicClient dynamic.Interface csiSnapshotClient *snapshotv1client.Clientset csiSnapshotLister snapshotv1listers.VolumeSnapshotLister + crClient ctrlclient.Client ctx context.Context cancelFunc context.CancelFunc logger logrus.FieldLogger @@ -268,6 +267,7 @@ type server struct { mgr manager.Manager credentialFileStore credentials.FileStore credentialSecretStore credentials.SecretStore + featureVerifier features.Verifier } func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*server, error) { @@ -294,12 +294,12 @@ func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*s return nil, err } - veleroClient, err := f.Client() + dynamicClient, err := f.DynamicClient() if err != nil { return nil, err } - dynamicClient, err := f.DynamicClient() + crClient, err := f.KubebuilderClient() if err != nil { return nil, err } @@ -309,6 +309,12 @@ func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*s return nil, err } + featureVerifier := features.NewVerifier(pluginRegistry) + + if _, err := featureVerifier.Verify(velerov1api.CSIFeatureFlag); err != nil { + logger.WithError(err).Warn("CSI feature verification failed, the feature may not be ready.") + } + // cancelFunc is not deferred here because if it was, then ctx would immediately // be canceled once this function exited, making it useless to any informers using later. // That, in turn, causes the velero server to halt when the first informer tries to use it. @@ -367,14 +373,20 @@ func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*s return nil, err } + var discoveryClient *discovery.DiscoveryClient + if discoveryClient, err = discovery.NewDiscoveryClientForConfig(clientConfig); err != nil { + cancelFunc() + return nil, err + } + s := &server{ namespace: f.Namespace(), metricsAddress: config.metricsAddress, kubeClientConfig: clientConfig, kubeClient: kubeClient, - veleroClient: veleroClient, - discoveryClient: veleroClient.Discovery(), + discoveryClient: discoveryClient, dynamicClient: dynamicClient, + crClient: crClient, ctx: ctx, cancelFunc: cancelFunc, logger: logger, @@ -384,6 +396,7 @@ func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*s mgr: mgr, credentialFileStore: credentialFileStore, credentialSecretStore: credentialSecretStore, + featureVerifier: featureVerifier, } // Setup CSI snapshot client and lister @@ -528,7 +541,7 @@ High priorities: - PVs go before PVCs because PVCs depend on them. - PVCs go before pods or controllers so they can be mounted as volumes. - Service accounts go before secrets so service account token secrets can be filled automatically. - - Secrets and config maps go before pods or controllers so they can be mounted + - Secrets and ConfigMaps go before pods or controllers so they can be mounted as volumes. - Limit ranges go before pods or controllers so pods can use them. - Pods go before controllers so they can be explicitly restored and potentially @@ -719,6 +732,11 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string s.logger.Fatal(err, "unable to create controller", "controller", controller.BackupStorageLocation) } + pvbInformer, err := s.mgr.GetCache().GetInformer(s.ctx, &velerov1api.PodVolumeBackup{}) + if err != nil { + s.logger.Fatal(err, "fail to get controller-runtime informer from manager for PVB") + } + if _, ok := enabledRuntimeControllers[controller.Backup]; ok { backupper, err := backup.NewKubernetesBackupper( s.mgr.GetClient(), @@ -728,10 +746,8 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string podvolume.NewBackupperFactory( s.repoLocker, s.repoEnsurer, - s.veleroClient, - s.kubeClient.CoreV1(), - s.kubeClient.CoreV1(), - s.kubeClient.CoreV1(), + s.crClient, + pvbInformer, s.logger, ), s.config.podVolumeOperationTimeout, @@ -760,7 +776,6 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string backupStoreGetter, s.config.formatFlag.Parse(), s.csiSnapshotLister, - s.csiSnapshotClient, s.credentialFileStore, s.config.maxConcurrentK8SConnections, s.config.defaultSnapshotMoveData, @@ -810,10 +825,8 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string podvolume.NewBackupperFactory( s.repoLocker, s.repoEnsurer, - s.veleroClient, - s.kubeClient.CoreV1(), - s.kubeClient.CoreV1(), - s.kubeClient.CoreV1(), + s.crClient, + pvbInformer, s.logger, ), s.config.podVolumeOperationTimeout, @@ -824,6 +837,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string cmd.CheckError(err) r := controller.NewBackupFinalizerReconciler( s.mgr.GetClient(), + s.csiSnapshotLister, clock.RealClock{}, backupper, newPluginManager, @@ -901,6 +915,11 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string } } + pvrInformer, err := s.mgr.GetCache().GetInformer(s.ctx, &velerov1api.PodVolumeRestore{}) + if err != nil { + s.logger.Fatal(err, "fail to get controller-runtime informer from manager for PVR") + } + if _, ok := enabledRuntimeControllers[controller.Restore]; ok { restorer, err := restore.NewKubernetesRestorer( s.discoveryHelper, @@ -910,10 +929,9 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string podvolume.NewRestorerFactory( s.repoLocker, s.repoEnsurer, - s.veleroClient, - s.kubeClient.CoreV1(), - s.kubeClient.CoreV1(), s.kubeClient, + s.crClient, + pvrInformer, s.logger, ), s.config.podVolumeOperationTimeout, @@ -924,6 +942,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string s.kubeClient.CoreV1().RESTClient(), s.credentialFileStore, s.mgr.GetClient(), + s.featureVerifier, ) cmd.CheckError(err) @@ -1062,13 +1081,13 @@ func markInProgressBackupsFailed(ctx context.Context, client ctrlclient.Client, } for i, backup := range backups.Items { - if backup.Status.Phase != velerov1api.BackupPhaseInProgress && backup.Status.Phase != velerov1api.BackupPhaseWaitingForPluginOperations { + if backup.Status.Phase != velerov1api.BackupPhaseInProgress { log.Debugf("the status of backup %q is %q, skip", backup.GetName(), backup.Status.Phase) continue } updated := backup.DeepCopy() updated.Status.Phase = velerov1api.BackupPhaseFailed - updated.Status.FailureReason = fmt.Sprintf("found a backup with status %q during the server starting, mark it as %q", velerov1api.BackupPhaseInProgress, updated.Status.Phase) + updated.Status.FailureReason = fmt.Sprintf("found a backup with status %q during the server starting, mark it as %q", backup.Status.Phase, updated.Status.Phase) updated.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()} if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&backups.Items[i])); err != nil { log.WithError(errors.WithStack(err)).Errorf("failed to patch backup %q", backup.GetName()) @@ -1086,13 +1105,13 @@ func markInProgressRestoresFailed(ctx context.Context, client ctrlclient.Client, return } for i, restore := range restores.Items { - if restore.Status.Phase != velerov1api.RestorePhaseInProgress && restore.Status.Phase != velerov1api.RestorePhaseWaitingForPluginOperations { + if restore.Status.Phase != velerov1api.RestorePhaseInProgress { log.Debugf("the status of restore %q is %q, skip", restore.GetName(), restore.Status.Phase) continue } updated := restore.DeepCopy() updated.Status.Phase = velerov1api.RestorePhaseFailed - updated.Status.FailureReason = fmt.Sprintf("found a restore with status %q during the server starting, mark it as %q", velerov1api.RestorePhaseInProgress, updated.Status.Phase) + updated.Status.FailureReason = fmt.Sprintf("found a restore with status %q during the server starting, mark it as %q", restore.Status.Phase, updated.Status.Phase) updated.Status.CompletionTimestamp = &metav1.Time{Time: time.Now()} if err := client.Patch(ctx, updated, ctrlclient.MergeFrom(&restores.Items[i])); err != nil { log.WithError(errors.WithStack(err)).Errorf("failed to patch restore %q", restore.GetName()) @@ -1115,7 +1134,9 @@ func markDataUploadsCancel(ctx context.Context, client ctrlclient.Client, backup du := dataUploads.Items[i] if du.Status.Phase == velerov2alpha1api.DataUploadPhaseAccepted || du.Status.Phase == velerov2alpha1api.DataUploadPhasePrepared || - du.Status.Phase == velerov2alpha1api.DataUploadPhaseInProgress { + du.Status.Phase == velerov2alpha1api.DataUploadPhaseInProgress || + du.Status.Phase == velerov2alpha1api.DataUploadPhaseNew || + du.Status.Phase == "" { err := controller.UpdateDataUploadWithRetry(ctx, client, types.NamespacedName{Namespace: du.Namespace, Name: du.Name}, log.WithField("dataupload", du.Name), func(dataUpload *velerov2alpha1api.DataUpload) { dataUpload.Spec.Cancel = true @@ -1143,7 +1164,9 @@ func markDataDownloadsCancel(ctx context.Context, client ctrlclient.Client, rest dd := dataDownloads.Items[i] if dd.Status.Phase == velerov2alpha1api.DataDownloadPhaseAccepted || dd.Status.Phase == velerov2alpha1api.DataDownloadPhasePrepared || - dd.Status.Phase == velerov2alpha1api.DataDownloadPhaseInProgress { + dd.Status.Phase == velerov2alpha1api.DataDownloadPhaseInProgress || + dd.Status.Phase == velerov2alpha1api.DataDownloadPhaseNew || + dd.Status.Phase == "" { err := controller.UpdateDataDownloadWithRetry(ctx, client, types.NamespacedName{Namespace: dd.Namespace, Name: dd.Name}, log.WithField("datadownload", dd.Name), func(dataDownload *velerov2alpha1api.DataDownload) { dataDownload.Spec.Cancel = true @@ -1151,7 +1174,7 @@ func markDataDownloadsCancel(ctx context.Context, client ctrlclient.Client, rest }) if err != nil { - log.WithError(errors.WithStack(err)).Errorf("failed to mark dataupload %q cancel", dd.GetName()) + log.WithError(errors.WithStack(err)).Errorf("failed to mark datadownload %q cancel", dd.GetName()) continue } log.WithField("datadownload", dd.GetName()).Warn(dd.Status.Message) diff --git a/pkg/controller/backup_controller.go b/pkg/controller/backup_controller.go index 0e53121f0..746c9d789 100644 --- a/pkg/controller/backup_controller.go +++ b/pkg/controller/backup_controller.go @@ -21,6 +21,7 @@ import ( "context" "fmt" "os" + "strings" "time" snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" @@ -33,7 +34,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" kerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/utils/clock" ctrl "sigs.k8s.io/controller-runtime" @@ -111,7 +111,6 @@ func NewBackupReconciler( backupStoreGetter persistence.ObjectBackupStoreGetter, formatFlag logging.Format, volumeSnapshotLister snapshotv1listers.VolumeSnapshotLister, - volumeSnapshotClient snapshotterClientSet.Interface, credentialStore credentials.FileStore, maxConcurrentK8SConnections int, defaultSnapshotMoveData bool, @@ -137,7 +136,6 @@ func NewBackupReconciler( backupStoreGetter: backupStoreGetter, formatFlag: formatFlag, volumeSnapshotLister: volumeSnapshotLister, - volumeSnapshotClient: volumeSnapshotClient, credentialFileStore: credentialStore, maxConcurrentK8SConnections: maxConcurrentK8SConnections, defaultSnapshotMoveData: defaultSnapshotMoveData, @@ -476,7 +474,7 @@ func (b *backupReconciler) prepareBackupRequest(backup *velerov1api.Backup, logg request.Status.ValidationErrors = append(request.Status.ValidationErrors, "encountered labelSelector as well as orLabelSelectors in backup spec, only one can be specified") } - if request.Spec.ResourcePolicy != nil && request.Spec.ResourcePolicy.Kind == resourcepolicies.ConfigmapRefType { + if request.Spec.ResourcePolicy != nil && strings.EqualFold(request.Spec.ResourcePolicy.Kind, resourcepolicies.ConfigmapRefType) { policiesConfigmap := &corev1api.ConfigMap{} err := b.kbClient.Get(context.Background(), kbclient.ObjectKey{Namespace: request.Namespace, Name: request.Spec.ResourcePolicy.Name}, policiesConfigmap) if err != nil { @@ -659,65 +657,15 @@ func (b *backupReconciler) runBackup(backup *pkgbackup.Request) error { fatalErrs = append(fatalErrs, err) } - // Empty slices here so that they can be passed in to the persistBackup call later, regardless of whether or not CSI's enabled. - // This way, we only make the Lister call if the feature flag's on. - var volumeSnapshots []snapshotv1api.VolumeSnapshot - var volumeSnapshotContents []snapshotv1api.VolumeSnapshotContent - var volumeSnapshotClasses []snapshotv1api.VolumeSnapshotClass - if boolptr.IsSetToTrue(backup.Spec.SnapshotMoveData) { - backupLog.Info("backup SnapshotMoveData is set to true, skip VolumeSnapshot resource persistence.") - } else if features.IsEnabled(velerov1api.CSIFeatureFlag) { - selector := label.NewSelectorForBackup(backup.Name) - vscList := &snapshotv1api.VolumeSnapshotContentList{} - - if b.volumeSnapshotLister != nil { - tmpVSs, err := b.volumeSnapshotLister.List(label.NewSelectorForBackup(backup.Name)) - if err != nil { - backupLog.Error(err) - } - for _, vs := range tmpVSs { - volumeSnapshots = append(volumeSnapshots, *vs) - } - } - - backup.CSISnapshots = volumeSnapshots - - err = b.kbClient.List(context.Background(), vscList, &kbclient.ListOptions{LabelSelector: selector}) - if err != nil { - backupLog.Error(err) - } - if len(vscList.Items) >= 0 { - volumeSnapshotContents = vscList.Items - } - - vsClassSet := sets.NewString() - for index := range volumeSnapshotContents { - // persist the volumesnapshotclasses referenced by vsc - if volumeSnapshotContents[index].Spec.VolumeSnapshotClassName != nil && !vsClassSet.Has(*volumeSnapshotContents[index].Spec.VolumeSnapshotClassName) { - vsClass := &snapshotv1api.VolumeSnapshotClass{} - if err := b.kbClient.Get(context.TODO(), kbclient.ObjectKey{Name: *volumeSnapshotContents[index].Spec.VolumeSnapshotClassName}, vsClass); err != nil { - backupLog.Error(err) - } else { - vsClassSet.Insert(*volumeSnapshotContents[index].Spec.VolumeSnapshotClassName) - volumeSnapshotClasses = append(volumeSnapshotClasses, *vsClass) - } - } - } - } - + // native snapshots phase will either be failed or completed right away + // https://github.com/vmware-tanzu/velero/blob/de3ea52f0cc478e99efa7b9524c7f353514261a4/pkg/backup/item_backupper.go#L632-L639 backup.Status.VolumeSnapshotsAttempted = len(backup.VolumeSnapshots) for _, snap := range backup.VolumeSnapshots { if snap.Status.Phase == volume.SnapshotPhaseCompleted { backup.Status.VolumeSnapshotsCompleted++ } } - - backup.Status.CSIVolumeSnapshotsAttempted = len(backup.CSISnapshots) - for _, vs := range backup.CSISnapshots { - if vs.Status != nil && boolptr.IsSetToTrue(vs.Status.ReadyToUse) { - backup.Status.CSIVolumeSnapshotsCompleted++ - } - } + volumeSnapshots, volumeSnapshotContents, volumeSnapshotClasses := pkgbackup.UpdateBackupCSISnapshotsStatus(b.kbClient, b.volumeSnapshotLister, backup.Backup, backupLog) // Iterate over backup item operations and update progress. // Any errors on operations at this point should be added to backup errors. @@ -848,6 +796,7 @@ func persistBackup(backup *pkgbackup.Request, ) []error { persistErrs := []error{} backupJSON := new(bytes.Buffer) + volumeInfos := make([]volume.VolumeInfo, 0) if err := encode.To(backup.Backup, "json", backupJSON); err != nil { persistErrs = append(persistErrs, errors.Wrap(err, "error encoding backup")) @@ -894,6 +843,11 @@ func persistBackup(backup *pkgbackup.Request, persistErrs = append(persistErrs, errs...) } + volumeInfoJSON, errs := encode.ToJSONGzip(volumeInfos, "backup volumes information") + if errs != nil { + persistErrs = append(persistErrs, errs...) + } + if len(persistErrs) > 0 { // Don't upload the JSON files or backup tarball if encoding to json fails. backupJSON = nil @@ -905,6 +859,7 @@ func persistBackup(backup *pkgbackup.Request, csiSnapshotContentsJSON = nil csiSnapshotClassesJSON = nil backupResult = nil + volumeInfoJSON = nil } backupInfo := persistence.BackupInfo{ @@ -920,6 +875,7 @@ func persistBackup(backup *pkgbackup.Request, CSIVolumeSnapshots: csiSnapshotJSON, CSIVolumeSnapshotContents: csiSnapshotContentsJSON, CSIVolumeSnapshotClasses: csiSnapshotClassesJSON, + BackupVolumeInfo: volumeInfoJSON, } if err := backupStore.PutBackup(backupInfo); err != nil { persistErrs = append(persistErrs, err) diff --git a/pkg/controller/backup_controller_test.go b/pkg/controller/backup_controller_test.go index f18787733..df2e22a22 100644 --- a/pkg/controller/backup_controller_test.go +++ b/pkg/controller/backup_controller_test.go @@ -21,11 +21,13 @@ import ( "context" "fmt" "io" + "reflect" "sort" "strings" "testing" "time" + "github.com/google/go-cmp/cmp" snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" snapshotfake "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/fake" snapshotinformers "github.com/kubernetes-csi/external-snapshotter/client/v4/informers/externalversions" @@ -43,6 +45,10 @@ import ( ctrl "sigs.k8s.io/controller-runtime" kbclient "sigs.k8s.io/controller-runtime/pkg/client" + kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube" + + fakeClient "sigs.k8s.io/controller-runtime/pkg/client/fake" + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" pkgbackup "github.com/vmware-tanzu/velero/pkg/backup" "github.com/vmware-tanzu/velero/pkg/builder" @@ -1665,3 +1671,63 @@ func Test_getLastSuccessBySchedule(t *testing.T) { }) } } + +// Unit tests to make sure that the backup's status is updated correctly during reconcile. +// To clear up confusion whether status can be updated with Patch alone without status writer and not kbClient.Status().Patch() +func TestPatchResourceWorksWithStatus(t *testing.T) { + type args struct { + original *velerov1api.Backup + updated *velerov1api.Backup + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "patch backup status", + args: args{ + original: defaultBackup().SnapshotMoveData(false).Result(), + updated: defaultBackup().SnapshotMoveData(false).WithStatus(velerov1api.BackupStatus{ + CSIVolumeSnapshotsCompleted: 1, + }).Result(), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scheme := runtime.NewScheme() + error := velerov1api.AddToScheme(scheme) + if error != nil { + t.Errorf("PatchResource() error = %v", error) + } + fakeClient := fakeClient.NewClientBuilder().WithScheme(scheme).WithObjects(tt.args.original).Build() + fromCluster := &velerov1api.Backup{ + ObjectMeta: metav1.ObjectMeta{ + Name: tt.args.original.Name, + Namespace: tt.args.original.Namespace, + }, + } + // check original exists + if err := fakeClient.Get(context.Background(), kbclient.ObjectKeyFromObject(tt.args.updated), fromCluster); err != nil { + t.Errorf("PatchResource() error = %v", err) + } + // ignore resourceVersion + tt.args.updated.ResourceVersion = fromCluster.ResourceVersion + tt.args.original.ResourceVersion = fromCluster.ResourceVersion + if err := kubeutil.PatchResource(tt.args.original, tt.args.updated, fakeClient); (err != nil) != tt.wantErr { + t.Errorf("PatchResource() error = %v, wantErr %v", err, tt.wantErr) + } + // check updated exists + if err := fakeClient.Get(context.Background(), kbclient.ObjectKeyFromObject(tt.args.updated), fromCluster); err != nil { + t.Errorf("PatchResource() error = %v", err) + } + + // check fromCluster is equal to updated + if !reflect.DeepEqual(fromCluster, tt.args.updated) { + t.Error(cmp.Diff(fromCluster, tt.args.updated)) + } + }) + + } +} diff --git a/pkg/controller/backup_finalizer_controller.go b/pkg/controller/backup_finalizer_controller.go index be88908de..eb99f6ee5 100644 --- a/pkg/controller/backup_finalizer_controller.go +++ b/pkg/controller/backup_finalizer_controller.go @@ -29,6 +29,8 @@ import ( ctrl "sigs.k8s.io/controller-runtime" kbclient "sigs.k8s.io/controller-runtime/pkg/client" + snapshotv1listers "github.com/kubernetes-csi/external-snapshotter/client/v4/listers/volumesnapshot/v1" + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" pkgbackup "github.com/vmware-tanzu/velero/pkg/backup" "github.com/vmware-tanzu/velero/pkg/metrics" @@ -40,19 +42,21 @@ import ( // backupFinalizerReconciler reconciles a Backup object type backupFinalizerReconciler struct { - client kbclient.Client - clock clocks.WithTickerAndDelayedExecution - backupper pkgbackup.Backupper - newPluginManager func(logrus.FieldLogger) clientmgmt.Manager - backupTracker BackupTracker - metrics *metrics.ServerMetrics - backupStoreGetter persistence.ObjectBackupStoreGetter - log logrus.FieldLogger + client kbclient.Client + volumeSnapshotLister snapshotv1listers.VolumeSnapshotLister + clock clocks.WithTickerAndDelayedExecution + backupper pkgbackup.Backupper + newPluginManager func(logrus.FieldLogger) clientmgmt.Manager + backupTracker BackupTracker + metrics *metrics.ServerMetrics + backupStoreGetter persistence.ObjectBackupStoreGetter + log logrus.FieldLogger } // NewBackupFinalizerReconciler initializes and returns backupFinalizerReconciler struct. func NewBackupFinalizerReconciler( client kbclient.Client, + volumeSnapshotLister snapshotv1listers.VolumeSnapshotLister, clock clocks.WithTickerAndDelayedExecution, backupper pkgbackup.Backupper, newPluginManager func(logrus.FieldLogger) clientmgmt.Manager, @@ -187,6 +191,7 @@ func (r *backupFinalizerReconciler) Reconcile(ctx context.Context, req ctrl.Requ backup.Status.CompletionTimestamp = &metav1.Time{Time: r.clock.Now()} recordBackupMetrics(log, backup, outBackupFile, r.metrics, true) + pkgbackup.UpdateBackupCSISnapshotsStatus(r.client, r.volumeSnapshotLister, backup, log) // update backup metadata in object store backupJSON := new(bytes.Buffer) if err := encode.To(backup, "json", backupJSON); err != nil { diff --git a/pkg/controller/backup_finalizer_controller_test.go b/pkg/controller/backup_finalizer_controller_test.go index 011a6561b..f759d0318 100644 --- a/pkg/controller/backup_finalizer_controller_test.go +++ b/pkg/controller/backup_finalizer_controller_test.go @@ -23,6 +23,7 @@ import ( "testing" "time" + snapshotv1listers "github.com/kubernetes-csi/external-snapshotter/client/v4/listers/volumesnapshot/v1" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -43,12 +44,14 @@ import ( "github.com/vmware-tanzu/velero/pkg/plugin/framework" "github.com/vmware-tanzu/velero/pkg/plugin/velero" velerotest "github.com/vmware-tanzu/velero/pkg/test" + velerotestmocks "github.com/vmware-tanzu/velero/pkg/test/mocks" ) -func mockBackupFinalizerReconciler(fakeClient kbclient.Client, fakeClock *testclocks.FakeClock) (*backupFinalizerReconciler, *fakeBackupper) { +func mockBackupFinalizerReconciler(fakeClient kbclient.Client, fakeVolumeSnapshotLister snapshotv1listers.VolumeSnapshotLister, fakeClock *testclocks.FakeClock) (*backupFinalizerReconciler, *fakeBackupper) { backupper := new(fakeBackupper) return NewBackupFinalizerReconciler( fakeClient, + fakeVolumeSnapshotLister, fakeClock, backupper, func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager }, @@ -160,7 +163,10 @@ func TestBackupFinalizerReconcile(t *testing.T) { } fakeClient := velerotest.NewFakeControllerRuntimeClient(t, initObjs...) - reconciler, backupper := mockBackupFinalizerReconciler(fakeClient, fakeClock) + + fakeVolumeSnapshotLister := velerotestmocks.NewVolumeSnapshotLister(t) + + reconciler, backupper := mockBackupFinalizerReconciler(fakeClient, fakeVolumeSnapshotLister, fakeClock) pluginManager.On("CleanupClients").Return(nil) backupStore.On("GetBackupItemOperations", test.backup.Name).Return(test.backupOperations, nil) backupStore.On("GetBackupContents", mock.Anything).Return(io.NopCloser(bytes.NewReader([]byte("hello world"))), nil) diff --git a/pkg/controller/backup_operations_controller.go b/pkg/controller/backup_operations_controller.go index f00e9c205..e36691e9c 100644 --- a/pkg/controller/backup_operations_controller.go +++ b/pkg/controller/backup_operations_controller.go @@ -19,8 +19,11 @@ package controller import ( "bytes" "context" + "fmt" "time" + v2 "github.com/vmware-tanzu/velero/pkg/plugin/velero/backupitemaction/v2" + "github.com/pkg/errors" "github.com/sirupsen/logrus" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -275,6 +278,8 @@ func (c *backupOperationsReconciler) updateBackupAndOperationsJSON( return nil } +// check progress of backupItemOperations +// return: inProgressOperations, changes, completedCount, failedCount, errs func getBackupItemOperationProgress( backup *velerov1api.Backup, pluginManager clientmgmt.Manager, @@ -291,7 +296,7 @@ func getBackupItemOperationProgress( if err != nil { operation.Status.Phase = itemoperation.OperationPhaseFailed operation.Status.Error = err.Error() - errs = append(errs, err.Error()) + errs = append(errs, wrapErrMsg(err.Error(), bia)) changes = true failedCount++ continue @@ -300,7 +305,7 @@ func getBackupItemOperationProgress( if err != nil { operation.Status.Phase = itemoperation.OperationPhaseFailed operation.Status.Error = err.Error() - errs = append(errs, err.Error()) + errs = append(errs, wrapErrMsg(err.Error(), bia)) changes = true failedCount++ continue @@ -338,7 +343,7 @@ func getBackupItemOperationProgress( if operationProgress.Err != "" { operation.Status.Phase = itemoperation.OperationPhaseFailed operation.Status.Error = operationProgress.Err - errs = append(errs, operationProgress.Err) + errs = append(errs, wrapErrMsg(operationProgress.Err, bia)) changes = true failedCount++ continue @@ -353,7 +358,7 @@ func getBackupItemOperationProgress( _ = bia.Cancel(operation.Spec.OperationID, backup) operation.Status.Phase = itemoperation.OperationPhaseFailed operation.Status.Error = "Asynchronous action timed out" - errs = append(errs, operation.Status.Error) + errs = append(errs, wrapErrMsg(operation.Status.Error, bia)) changes = true failedCount++ continue @@ -373,3 +378,15 @@ func getBackupItemOperationProgress( } return inProgressOperations, changes, completedCount, failedCount, errs } + +// wrap the error message to include the BIA name +func wrapErrMsg(errMsg string, bia v2.BackupItemAction) string { + plugin := "unknown" + if bia != nil { + plugin = bia.Name() + } + if len(errMsg) > 0 { + errMsg += ", " + } + return fmt.Sprintf("%splugin: %s", errMsg, plugin) +} diff --git a/pkg/controller/backup_operations_controller_test.go b/pkg/controller/backup_operations_controller_test.go index 417294a7c..00ffd7f7a 100644 --- a/pkg/controller/backup_operations_controller_test.go +++ b/pkg/controller/backup_operations_controller_test.go @@ -21,6 +21,8 @@ import ( "testing" "time" + v2 "github.com/vmware-tanzu/velero/pkg/plugin/velero/backupitemaction/v2" + "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -286,6 +288,7 @@ func TestBackupOperationsReconcile(t *testing.T) { backupStore.On("PutBackupItemOperations", mock.Anything, mock.Anything).Return(nil) backupStore.On("PutBackupMetadata", mock.Anything, mock.Anything).Return(nil) for _, operation := range test.backupOperations { + bia.On("Name").Return("test") bia.On("Progress", operation.Spec.OperationID, mock.Anything). Return(velero.OperationProgress{ Completed: test.operationComplete, @@ -308,3 +311,40 @@ func TestBackupOperationsReconcile(t *testing.T) { }) } } + +func TestWrapErrMsg(t *testing.T) { + bia2 := &biav2mocks.BackupItemAction{} + bia2.On("Name").Return("test-bia") + cases := []struct { + name string + inputErr string + plugin v2.BackupItemAction + expect string + }{ + { + name: "empty error message", + inputErr: "", + plugin: bia2, + expect: "plugin: test-bia", + }, + { + name: "nil bia", + inputErr: "some error happened", + plugin: nil, + expect: "some error happened, plugin: unknown", + }, + { + name: "regular error and bia", + inputErr: "some error happened", + plugin: bia2, + expect: "some error happened, plugin: test-bia", + }, + } + + for _, test := range cases { + t.Run(test.name, func(t *testing.T) { + got := wrapErrMsg(test.inputErr, test.plugin) + assert.Equal(t, test.expect, got) + }) + } +} diff --git a/pkg/controller/backup_sync_controller.go b/pkg/controller/backup_sync_controller.go index 2fb61d603..7a3b47a11 100644 --- a/pkg/controller/backup_sync_controller.go +++ b/pkg/controller/backup_sync_controller.go @@ -29,6 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -142,6 +143,16 @@ func (b *backupSyncReconciler) Reconcile(ctx context.Context, req ctrl.Request) log = log.WithField("backup", backupName) log.Info("Attempting to sync backup into cluster") + exist, err := backupStore.BackupExists(location.Spec.ObjectStorage.Bucket, backupName) + if err != nil { + log.WithError(errors.WithStack(err)).Error("Error checking backup exist from backup store") + continue + } + if !exist { + log.Debugf("backup %s doesn't exist in backup store, skip", backupName) + continue + } + backup, err := backupStore.GetBackupMetadata(backupName) if err != nil { log.WithError(errors.WithStack(err)).Error("Error getting backup metadata from backup store") @@ -171,6 +182,9 @@ func (b *backupSyncReconciler) Reconcile(ctx context.Context, req ctrl.Request) } backup.Labels[velerov1api.StorageLocationLabel] = label.GetValidName(backup.Spec.StorageLocation) + //check for the ownership references. If they do not exist, remove them. + backup.ObjectMeta.OwnerReferences = b.filterBackupOwnerReferences(ctx, backup, log) + // attempt to create backup custom resource via API err = b.client.Create(ctx, backup, &client.CreateOptions{}) switch { @@ -286,6 +300,35 @@ func (b *backupSyncReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, nil } +func (b *backupSyncReconciler) filterBackupOwnerReferences(ctx context.Context, backup *velerov1api.Backup, log logrus.FieldLogger) []metav1.OwnerReference { + listedReferences := backup.ObjectMeta.OwnerReferences + foundReferences := make([]metav1.OwnerReference, 0) + for _, v := range listedReferences { + switch v.Kind { + case "Schedule": + schedule := new(velerov1api.Schedule) + err := b.client.Get(ctx, types.NamespacedName{ + Name: v.Name, + Namespace: backup.Namespace, + }, schedule) + switch { + case err != nil && apierrors.IsNotFound(err): + log.Warnf("Removing missing schedule ownership reference %s/%s from backup", backup.Namespace, v.Name) + continue + case schedule.UID != v.UID: + log.Warnf("Removing schedule ownership reference with mismatched UIDs. Expected %s, got %s", v.UID, schedule.UID) + continue + case err != nil && !apierrors.IsNotFound(err): + log.WithError(errors.WithStack(err)).Error("Error finding schedule ownership reference, keeping schedule on backup") + } + default: + log.Warnf("Unable to check ownership reference for unknown kind, %s", v.Kind) + } + foundReferences = append(foundReferences, v) + } + return foundReferences +} + // SetupWithManager is used to setup controller and its watching sources. func (b *backupSyncReconciler) SetupWithManager(mgr ctrl.Manager) error { backupSyncSource := kube.NewPeriodicalEnqueueSource( diff --git a/pkg/controller/backup_sync_controller_test.go b/pkg/controller/backup_sync_controller_test.go index 86ca3227b..1a7857ec1 100644 --- a/pkg/controller/backup_sync_controller_test.go +++ b/pkg/controller/backup_sync_controller_test.go @@ -429,6 +429,7 @@ var _ = Describe("Backup Sync Reconciler", func() { backupNames = append(backupNames, backup.backup.Name) backupStore.On("GetBackupMetadata", backup.backup.Name).Return(backup.backup, nil) backupStore.On("GetPodVolumeBackups", backup.backup.Name).Return(backup.podVolumeBackups, nil) + backupStore.On("BackupExists", "bucket-1", backup.backup.Name).Return(true, nil) } backupStore.On("ListBackups").Return(backupNames, nil) } @@ -724,4 +725,174 @@ var _ = Describe("Backup Sync Reconciler", func() { Expect(testObjList).To(BeEquivalentTo(locationList)) }) + + When("testing validateOwnerReferences", func() { + + testCases := []struct { + name string + backup *velerov1api.Backup + toCreate []ctrlClient.Object + expectedReferences []metav1.OwnerReference + }{ + { + name: "handles empty owner references", + backup: &velerov1api.Backup{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{}, + }, + }, + expectedReferences: []metav1.OwnerReference{}, + }, + { + name: "handles missing schedule", + backup: &velerov1api.Backup{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Schedule", + Name: "some name", + }, + }, + }, + }, + expectedReferences: []metav1.OwnerReference{}, + }, + { + name: "handles existing reference", + backup: &velerov1api.Backup{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Schedule", + Name: "existing-schedule", + }, + }, + Namespace: "test-namespace", + }, + }, + toCreate: []ctrlClient.Object{ + &velerov1api.Schedule{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-schedule", + Namespace: "test-namespace", + }, + }, + }, + expectedReferences: []metav1.OwnerReference{ + { + Kind: "Schedule", + Name: "existing-schedule", + }, + }, + }, + { + name: "handles existing mismatched UID", + backup: &velerov1api.Backup{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Schedule", + Name: "existing-schedule", + UID: "backup-UID", + }, + }, + Namespace: "test-namespace", + }, + }, + toCreate: []ctrlClient.Object{ + &velerov1api.Schedule{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-schedule", + Namespace: "test-namespace", + UID: "schedule-UID", + }, + }, + }, + expectedReferences: []metav1.OwnerReference{}, + }, + { + name: "handles multiple references", + backup: &velerov1api.Backup{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + { + Kind: "Schedule", + Name: "existing-schedule", + UID: "1", + }, + { + Kind: "Schedule", + Name: "missing-schedule", + UID: "2", + }, + { + Kind: "Schedule", + Name: "mismatched-uid-schedule", + UID: "3", + }, + { + Kind: "Schedule", + Name: "another-existing-schedule", + UID: "4", + }, + }, + Namespace: "test-namespace", + }, + }, + toCreate: []ctrlClient.Object{ + &velerov1api.Schedule{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-schedule", + Namespace: "test-namespace", + UID: "1", + }, + }, + &velerov1api.Schedule{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mismatched-uid-schedule", + Namespace: "test-namespace", + UID: "not-3", + }, + }, + &velerov1api.Schedule{ + ObjectMeta: metav1.ObjectMeta{ + Name: "another-existing-schedule", + Namespace: "test-namespace", + UID: "4", + }, + }, + }, + expectedReferences: []metav1.OwnerReference{ + { + Kind: "Schedule", + Name: "existing-schedule", + UID: "1", + }, + { + Kind: "Schedule", + Name: "another-existing-schedule", + UID: "4", + }, + }, + }, + } + for _, test := range testCases { + test := test + It(test.name, func() { + logger := velerotest.NewLogger() + b := backupSyncReconciler{ + client: ctrlfake.NewClientBuilder().Build(), + } + + //create all required schedules as needed. + for _, creatable := range test.toCreate { + err := b.client.Create(context.Background(), creatable) + Expect(err).ShouldNot(HaveOccurred()) + } + + references := b.filterBackupOwnerReferences(context.Background(), test.backup, logger) + Expect(references).To(BeEquivalentTo(test.expectedReferences)) + }) + } + }) }) diff --git a/pkg/controller/data_download_controller.go b/pkg/controller/data_download_controller.go index cc60cacef..bf4299ea4 100644 --- a/pkg/controller/data_download_controller.go +++ b/pkg/controller/data_download_controller.go @@ -69,7 +69,7 @@ type DataDownloadReconciler struct { metrics *metrics.ServerMetrics } -func NewDataDownloadReconciler(client client.Client, kubeClient kubernetes.Interface, +func NewDataDownloadReconciler(client client.Client, kubeClient kubernetes.Interface, dataPathMgr *datapath.Manager, repoEnsurer *repository.Ensurer, credentialGetter *credentials.CredentialGetter, nodeName string, preparingTimeout time.Duration, logger logrus.FieldLogger, metrics *metrics.ServerMetrics) *DataDownloadReconciler { return &DataDownloadReconciler{ client: client, @@ -81,7 +81,7 @@ func NewDataDownloadReconciler(client client.Client, kubeClient kubernetes.Inter nodeName: nodeName, repositoryEnsurer: repoEnsurer, restoreExposer: exposer.NewGenericRestoreExposer(kubeClient, logger), - dataPathMgr: datapath.NewManager(1), + dataPathMgr: dataPathMgr, preparingTimeout: preparingTimeout, metrics: metrics, } @@ -287,12 +287,13 @@ func (r *DataDownloadReconciler) Reconcile(ctx context.Context, req ctrl.Request } else if dd.Status.Phase == velerov2alpha1api.DataDownloadPhaseInProgress { log.Info("Data download is in progress") if dd.Spec.Cancel { + log.Info("Data download is being canceled") fsRestore := r.dataPathMgr.GetAsyncBR(dd.Name) if fsRestore == nil { + r.OnDataDownloadCancelled(ctx, dd.GetNamespace(), dd.GetName()) return ctrl.Result{}, nil } - log.Info("Data download is being canceled") // Update status to Canceling. original := dd.DeepCopy() dd.Status.Phase = velerov2alpha1api.DataDownloadPhaseCanceling @@ -300,7 +301,6 @@ func (r *DataDownloadReconciler) Reconcile(ctx context.Context, req ctrl.Request log.WithError(err).Error("error updating data download status") return ctrl.Result{}, err } - fsRestore.Cancel() return ctrl.Result{}, nil } diff --git a/pkg/controller/data_download_controller_test.go b/pkg/controller/data_download_controller_test.go index 46162f0cb..de9fa7516 100644 --- a/pkg/controller/data_download_controller_test.go +++ b/pkg/controller/data_download_controller_test.go @@ -142,7 +142,10 @@ func initDataDownloadReconcilerWithError(objects []runtime.Object, needError ... if err != nil { return nil, err } - return NewDataDownloadReconciler(fakeClient, fakeKubeClient, nil, &credentials.CredentialGetter{FromFile: credentialFileStore}, "test_node", time.Minute*5, velerotest.NewLogger(), metrics.NewServerMetrics()), nil + + dataPathMgr := datapath.NewManager(1) + + return NewDataDownloadReconciler(fakeClient, fakeKubeClient, dataPathMgr, nil, &credentials.CredentialGetter{FromFile: credentialFileStore}, "test_node", time.Minute*5, velerotest.NewLogger(), metrics.NewServerMetrics()), nil } func TestDataDownloadReconcile(t *testing.T) { diff --git a/pkg/controller/data_upload_controller.go b/pkg/controller/data_upload_controller.go index 8371e0a11..feb1e9866 100644 --- a/pkg/controller/data_upload_controller.go +++ b/pkg/controller/data_upload_controller.go @@ -79,8 +79,8 @@ type DataUploadReconciler struct { metrics *metrics.ServerMetrics } -func NewDataUploadReconciler(client client.Client, kubeClient kubernetes.Interface, - csiSnapshotClient snapshotter.SnapshotV1Interface, repoEnsurer *repository.Ensurer, clock clocks.WithTickerAndDelayedExecution, +func NewDataUploadReconciler(client client.Client, kubeClient kubernetes.Interface, csiSnapshotClient snapshotter.SnapshotV1Interface, + dataPathMgr *datapath.Manager, repoEnsurer *repository.Ensurer, clock clocks.WithTickerAndDelayedExecution, cred *credentials.CredentialGetter, nodeName string, fs filesystem.Interface, preparingTimeout time.Duration, log logrus.FieldLogger, metrics *metrics.ServerMetrics) *DataUploadReconciler { return &DataUploadReconciler{ client: client, @@ -93,7 +93,7 @@ func NewDataUploadReconciler(client client.Client, kubeClient kubernetes.Interfa logger: log, repoEnsurer: repoEnsurer, snapshotExposerList: map[velerov2alpha1api.SnapshotType]exposer.SnapshotExposer{velerov2alpha1api.SnapshotTypeCSI: exposer.NewCSISnapshotExposer(kubeClient, csiSnapshotClient, log)}, - dataPathMgr: datapath.NewManager(1), + dataPathMgr: dataPathMgr, preparingTimeout: preparingTimeout, metrics: metrics, } @@ -292,13 +292,15 @@ func (r *DataUploadReconciler) Reconcile(ctx context.Context, req ctrl.Request) } else if du.Status.Phase == velerov2alpha1api.DataUploadPhaseInProgress { log.Info("Data upload is in progress") if du.Spec.Cancel { - fsBackup := r.dataPathMgr.GetAsyncBR(du.Name) - if fsBackup == nil { - return ctrl.Result{}, nil - } log.Info("Data upload is being canceled") - // Update status to Canceling. + fsBackup := r.dataPathMgr.GetAsyncBR(du.Name) + if fsBackup == nil { + r.OnDataUploadCancelled(ctx, du.GetNamespace(), du.GetName()) + return ctrl.Result{}, nil + } + + // Update status to Canceling original := du.DeepCopy() du.Status.Phase = velerov2alpha1api.DataUploadPhaseCanceling if err := r.client.Patch(ctx, du, client.MergeFrom(original)); err != nil { @@ -761,7 +763,9 @@ func (r *DataUploadReconciler) setupExposeParam(du *velerov2alpha1api.DataUpload StorageClass: du.Spec.CSISnapshot.StorageClass, HostingPodLabels: map[string]string{velerov1api.DataUploadLabel: du.Name}, AccessMode: accessMode, - Timeout: du.Spec.OperationTimeout.Duration, + OperationTimeout: du.Spec.OperationTimeout.Duration, + ExposeTimeout: r.preparingTimeout, + VolumeSize: pvc.Spec.Resources.Requests[corev1.ResourceStorage], }, nil } return nil, nil diff --git a/pkg/controller/data_upload_controller_test.go b/pkg/controller/data_upload_controller_test.go index b74e8b1e3..fd35cfe7d 100644 --- a/pkg/controller/data_upload_controller_test.go +++ b/pkg/controller/data_upload_controller_test.go @@ -163,6 +163,8 @@ func initDataUploaderReconcilerWithError(needError ...error) (*DataUploadReconci Spec: appsv1.DaemonSetSpec{}, } + dataPathMgr := datapath.NewManager(1) + now, err := time.Parse(time.RFC1123, time.RFC1123) if err != nil { return nil, err @@ -218,7 +220,7 @@ func initDataUploaderReconcilerWithError(needError ...error) (*DataUploadReconci if err != nil { return nil, err } - return NewDataUploadReconciler(fakeClient, fakeKubeClient, fakeSnapshotClient.SnapshotV1(), nil, + return NewDataUploadReconciler(fakeClient, fakeKubeClient, fakeSnapshotClient.SnapshotV1(), dataPathMgr, nil, testclocks.NewFakeClock(now), &credentials.CredentialGetter{FromFile: credentialFileStore}, "test_node", fakeFS, time.Minute*5, velerotest.NewLogger(), metrics.NewServerMetrics()), nil } diff --git a/pkg/controller/pod_volume_backup_controller.go b/pkg/controller/pod_volume_backup_controller.go index e54be8177..cebe777c1 100644 --- a/pkg/controller/pod_volume_backup_controller.go +++ b/pkg/controller/pod_volume_backup_controller.go @@ -47,7 +47,7 @@ import ( const pVBRRequestor string = "pod-volume-backup-restore" // NewPodVolumeBackupReconciler creates the PodVolumeBackupReconciler instance -func NewPodVolumeBackupReconciler(client client.Client, ensurer *repository.Ensurer, credentialGetter *credentials.CredentialGetter, +func NewPodVolumeBackupReconciler(client client.Client, dataPathMgr *datapath.Manager, ensurer *repository.Ensurer, credentialGetter *credentials.CredentialGetter, nodeName string, scheme *runtime.Scheme, metrics *metrics.ServerMetrics, logger logrus.FieldLogger) *PodVolumeBackupReconciler { return &PodVolumeBackupReconciler{ Client: client, @@ -59,7 +59,7 @@ func NewPodVolumeBackupReconciler(client client.Client, ensurer *repository.Ensu clock: &clocks.RealClock{}, scheme: scheme, metrics: metrics, - dataPathMgr: datapath.NewManager(1), + dataPathMgr: dataPathMgr, } } diff --git a/pkg/controller/pod_volume_restore_controller.go b/pkg/controller/pod_volume_restore_controller.go index d48925710..34d6c6551 100644 --- a/pkg/controller/pod_volume_restore_controller.go +++ b/pkg/controller/pod_volume_restore_controller.go @@ -50,7 +50,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/util/filesystem" ) -func NewPodVolumeRestoreReconciler(client client.Client, ensurer *repository.Ensurer, +func NewPodVolumeRestoreReconciler(client client.Client, dataPathMgr *datapath.Manager, ensurer *repository.Ensurer, credentialGetter *credentials.CredentialGetter, logger logrus.FieldLogger) *PodVolumeRestoreReconciler { return &PodVolumeRestoreReconciler{ Client: client, @@ -59,7 +59,7 @@ func NewPodVolumeRestoreReconciler(client client.Client, ensurer *repository.Ens credentialGetter: credentialGetter, fileSystem: filesystem.NewFileSystem(), clock: &clocks.RealClock{}, - dataPathMgr: datapath.NewManager(1), + dataPathMgr: dataPathMgr, } } diff --git a/pkg/controller/restore_controller.go b/pkg/controller/restore_controller.go index 0e11d7ced..f6b9b39d9 100644 --- a/pkg/controller/restore_controller.go +++ b/pkg/controller/restore_controller.go @@ -25,6 +25,7 @@ import ( "io" "os" "sort" + "strings" "time" "github.com/pkg/errors" @@ -376,7 +377,7 @@ func (r *restoreReconciler) validateAndComplete(restore *api.Restore) (backupInf } var resourceModifiers *resourcemodifiers.ResourceModifiers = nil - if restore.Spec.ResourceModifier != nil && restore.Spec.ResourceModifier.Kind == resourcemodifiers.ConfigmapRefType { + if restore.Spec.ResourceModifier != nil && strings.EqualFold(restore.Spec.ResourceModifier.Kind, resourcemodifiers.ConfigmapRefType) { ResourceModifierConfigMap := &corev1api.ConfigMap{} err := r.kbClient.Get(context.Background(), client.ObjectKey{Namespace: restore.Namespace, Name: restore.Spec.ResourceModifier.Name}, ResourceModifierConfigMap) if err != nil { @@ -514,6 +515,11 @@ func (r *restoreReconciler) runValidatedRestore(restore *api.Restore, info backu return errors.Wrap(err, "error fetching volume snapshots metadata") } + csiVolumeSnapshots, err := backupStore.GetCSIVolumeSnapshots(restore.Spec.BackupName) + if err != nil { + return errors.Wrap(err, "fail to fetch CSI VolumeSnapshots metadata") + } + restoreLog.Info("starting restore") var podVolumeBackups []*api.PodVolumeBackup @@ -530,6 +536,7 @@ func (r *restoreReconciler) runValidatedRestore(restore *api.Restore, info backu BackupReader: backupFile, ResourceModifiers: resourceModifiers, DisableInformerCache: r.disableInformerCache, + CSIVolumeSnapshots: csiVolumeSnapshots, } restoreWarnings, restoreErrors := r.restorer.RestoreWithResolvers(restoreReq, actionsResolver, pluginManager) diff --git a/pkg/controller/restore_controller_test.go b/pkg/controller/restore_controller_test.go index e3ffaf4df..9437f1d1c 100644 --- a/pkg/controller/restore_controller_test.go +++ b/pkg/controller/restore_controller_test.go @@ -23,6 +23,7 @@ import ( "testing" "time" + snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" @@ -471,6 +472,7 @@ func TestRestoreReconcile(t *testing.T) { } if test.expectedRestorerCall != nil { backupStore.On("GetBackupContents", test.backup.Name).Return(io.NopCloser(bytes.NewReader([]byte("hello world"))), nil) + backupStore.On("GetCSIVolumeSnapshots", test.backup.Name).Return([]*snapshotv1api.VolumeSnapshot{}, nil) restorer.On("RestoreWithResolvers", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(warnings, errors) @@ -781,7 +783,8 @@ func TestValidateAndCompleteWithResourceModifierSpecified(t *testing.T) { Spec: velerov1api.RestoreSpec{ BackupName: "backup-1", ResourceModifier: &corev1.TypedLocalObjectReference{ - Kind: resourcemodifiers.ConfigmapRefType, + // intentional to ensure case insensitivity works as expected + Kind: "confIGMaP", Name: "test-configmap-invalid", }, }, diff --git a/pkg/discovery/helper_test.go b/pkg/discovery/helper_test.go index cec92a8f2..3091676a2 100644 --- a/pkg/discovery/helper_test.go +++ b/pkg/discovery/helper_test.go @@ -30,7 +30,6 @@ import ( clientgotesting "k8s.io/client-go/testing" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - discoverymocks "github.com/vmware-tanzu/velero/pkg/discovery/mocks" "github.com/vmware-tanzu/velero/pkg/features" velerotest "github.com/vmware-tanzu/velero/pkg/test" "github.com/vmware-tanzu/velero/pkg/util/logging" @@ -548,39 +547,31 @@ func TestHelper_refreshServerPreferredResources(t *testing.T) { } tests := []struct { - name string - isGetResError bool + name string + expectedErr error }{ { - name: "success get preferred resources", + name: "success get preferred resources", + expectedErr: nil, }, { - name: "failed to get preferred resources", - isGetResError: true, + name: "failed to get preferred resources", + expectedErr: errors.New("Failed to discover preferred resources"), }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - fakeClient := discoverymocks.NewServerResourcesInterface(t) - - if tc.isGetResError { - fakeClient.On("ServerPreferredResources").Return(nil, errors.New("Failed to discover preferred resources")) - } else { - fakeClient.On("ServerPreferredResources").Return(apiList, nil) - } + fakeClient := velerotest.NewFakeServerResourcesInterface(apiList, []*metav1.APIGroup{}, map[schema.GroupVersion]error{}, tc.expectedErr) resources, err := refreshServerPreferredResources(fakeClient, logrus.New()) - if tc.isGetResError { + if tc.expectedErr != nil { assert.NotNil(t, err) - assert.Nil(t, resources) } else { assert.Nil(t, err) assert.NotNil(t, resources) } - - fakeClient.AssertExpectations(t) }) } } @@ -612,41 +603,31 @@ func TestHelper_refreshServerGroupsAndResources(t *testing.T) { }, } tests := []struct { - name string - isGetResError bool + name string + expectedErr error }{ { name: "success get service groups and resouorces", }, { - name: "failed to service groups and resouorces", - isGetResError: true, + name: "failed to service groups and resouorces", + expectedErr: errors.New("Failed to discover service groups and resouorces"), }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - fakeClient := discoverymocks.NewServerResourcesInterface(t) - - if tc.isGetResError { - fakeClient.On("ServerGroupsAndResources").Return(nil, nil, errors.New("Failed to discover service groups and resouorces")) - } else { - fakeClient.On("ServerGroupsAndResources").Return(apiGroup, apiList, nil) - } + fakeClient := velerotest.NewFakeServerResourcesInterface(apiList, apiGroup, map[schema.GroupVersion]error{}, tc.expectedErr) serverGroups, serverResources, err := refreshServerGroupsAndResources(fakeClient, logrus.New()) - if tc.isGetResError { + if tc.expectedErr != nil { assert.NotNil(t, err) - assert.Nil(t, serverGroups) - assert.Nil(t, serverResources) } else { assert.Nil(t, err) assert.NotNil(t, serverGroups) assert.NotNil(t, serverResources) } - - fakeClient.AssertExpectations(t) }) } } diff --git a/pkg/discovery/mocks/Helper.go b/pkg/discovery/mocks/Helper.go index f0dbcfc03..3f73f67be 100644 --- a/pkg/discovery/mocks/Helper.go +++ b/pkg/discovery/mocks/Helper.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.30.1. DO NOT EDIT. +// Code generated by mockery v2.20.0. DO NOT EDIT. package mocks @@ -140,12 +140,13 @@ func (_m *Helper) ServerVersion() *version.Info { return r0 } -// NewHelper creates a new instance of Helper. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewHelper(t interface { +type mockConstructorTestingTNewHelper interface { mock.TestingT Cleanup(func()) -}) *Helper { +} + +// NewHelper creates a new instance of Helper. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewHelper(t mockConstructorTestingTNewHelper) *Helper { mock := &Helper{} mock.Mock.Test(t) @@ -153,84 +154,3 @@ func NewHelper(t interface { return mock } - -// serverResourcesInterface is an autogenerated mock type for the serverResourcesInterface type -type serverResourcesInterface struct { - mock.Mock -} - -// ServerGroupsAndResources provides a mock function with given fields: -func (_m *serverResourcesInterface) ServerGroupsAndResources() ([]*v1.APIGroup, []*v1.APIResourceList, error) { - ret := _m.Called() - - var r0 []*v1.APIGroup - var r1 []*v1.APIResourceList - var r2 error - if rf, ok := ret.Get(0).(func() ([]*v1.APIGroup, []*v1.APIResourceList, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() []*v1.APIGroup); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*v1.APIGroup) - } - } - - if rf, ok := ret.Get(1).(func() []*v1.APIResourceList); ok { - r1 = rf() - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).([]*v1.APIResourceList) - } - } - - if rf, ok := ret.Get(2).(func() error); ok { - r2 = rf() - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// ServerPreferredResources provides a mock function with given fields: -func (_m *serverResourcesInterface) ServerPreferredResources() ([]*v1.APIResourceList, error) { - ret := _m.Called() - - var r0 []*v1.APIResourceList - var r1 error - if rf, ok := ret.Get(0).(func() ([]*v1.APIResourceList, error)); ok { - return rf() - } - if rf, ok := ret.Get(0).(func() []*v1.APIResourceList); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*v1.APIResourceList) - } - } - - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -type mockConstructorTestingTnewServerResourcesInterface interface { - mock.TestingT - Cleanup(func()) -} - -// NewServerResourcesInterface creates a new instance of serverResourcesInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewServerResourcesInterface(t mockConstructorTestingTnewServerResourcesInterface) *serverResourcesInterface { - mock := &serverResourcesInterface{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/pkg/exposer/csi_snapshot.go b/pkg/exposer/csi_snapshot.go index 85f511524..9979c5fdd 100644 --- a/pkg/exposer/csi_snapshot.go +++ b/pkg/exposer/csi_snapshot.go @@ -59,8 +59,14 @@ type CSISnapshotExposeParam struct { // HostingPodLabels is the labels that are going to apply to the hosting pod HostingPodLabels map[string]string - // Timeout specifies the time wait for resources operations in Expose - Timeout time.Duration + // OperationTimeout specifies the time wait for resources operations in Expose + OperationTimeout time.Duration + + // ExposeTimeout specifies the timeout for the entire expose process + ExposeTimeout time.Duration + + // VolumeSize specifies the size of the source volume + VolumeSize resource.Quantity } // CSISnapshotExposeWaitParam define the input param for WaitExposed of CSI snapshots @@ -94,7 +100,7 @@ func (e *csiSnapshotExposer) Expose(ctx context.Context, ownerObject corev1.Obje curLog.Info("Exposing CSI snapshot") - volumeSnapshot, err := csi.WaitVolumeSnapshotReady(ctx, e.csiSnapshotClient, csiExposeParam.SnapshotName, csiExposeParam.SourceNamespace, csiExposeParam.Timeout, curLog) + volumeSnapshot, err := csi.WaitVolumeSnapshotReady(ctx, e.csiSnapshotClient, csiExposeParam.SnapshotName, csiExposeParam.SourceNamespace, csiExposeParam.ExposeTimeout, curLog) if err != nil { return errors.Wrapf(err, "error wait volume snapshot ready") } @@ -121,14 +127,21 @@ func (e *csiSnapshotExposer) Expose(ctx context.Context, ownerObject corev1.Obje } }() - err = csi.EnsureDeleteVS(ctx, e.csiSnapshotClient, volumeSnapshot.Name, volumeSnapshot.Namespace, csiExposeParam.Timeout) + err = csi.EnsureDeleteVS(ctx, e.csiSnapshotClient, volumeSnapshot.Name, volumeSnapshot.Namespace, csiExposeParam.OperationTimeout) if err != nil { return errors.Wrap(err, "error to delete volume snapshot") } curLog.WithField("vs name", volumeSnapshot.Name).Infof("VS is deleted in namespace %s", volumeSnapshot.Namespace) - err = csi.EnsureDeleteVSC(ctx, e.csiSnapshotClient, vsc.Name, csiExposeParam.Timeout) + err = csi.RemoveVSCProtect(ctx, e.csiSnapshotClient, vsc.Name, csiExposeParam.ExposeTimeout) + if err != nil { + return errors.Wrap(err, "error to remove protect from volume snapshot content") + } + + curLog.WithField("vsc name", vsc.Name).Infof("Removed protect from VSC") + + err = csi.EnsureDeleteVSC(ctx, e.csiSnapshotClient, vsc.Name, csiExposeParam.OperationTimeout) if err != nil { return errors.Wrap(err, "error to delete volume snapshot content") } @@ -156,7 +169,15 @@ func (e *csiSnapshotExposer) Expose(ctx context.Context, ownerObject corev1.Obje curLog.WithField("vsc name", backupVSC.Name).Infof("Backup VSC is created from %s", vsc.Name) - backupPVC, err := e.createBackupPVC(ctx, ownerObject, backupVS.Name, csiExposeParam.StorageClass, csiExposeParam.AccessMode, *volumeSnapshot.Status.RestoreSize) + var volumeSize resource.Quantity + if volumeSnapshot.Status.RestoreSize != nil && !volumeSnapshot.Status.RestoreSize.IsZero() { + volumeSize = *volumeSnapshot.Status.RestoreSize + } else { + volumeSize = csiExposeParam.VolumeSize + curLog.WithField("vs name", volumeSnapshot.Name).Warnf("The snapshot doesn't contain a valid restore size, use source volume's size %v", volumeSize) + } + + backupPVC, err := e.createBackupPVC(ctx, ownerObject, backupVS.Name, csiExposeParam.StorageClass, csiExposeParam.AccessMode, volumeSize) if err != nil { return errors.Wrap(err, "error to create backup pvc") } @@ -190,6 +211,7 @@ func (e *csiSnapshotExposer) GetExposed(ctx context.Context, ownerObject corev1. backupPodName := ownerObject.Name backupPVCName := ownerObject.Name + volumeName := string(ownerObject.UID) curLog := e.log.WithFields(logrus.Fields{ "owner": ownerObject.Name, @@ -218,7 +240,20 @@ func (e *csiSnapshotExposer) GetExposed(ctx context.Context, ownerObject corev1. curLog.WithField("backup pvc", backupPVCName).Info("Backup PVC is bound") - return &ExposeResult{ByPod: ExposeByPod{HostingPod: pod, VolumeName: pod.Spec.Volumes[0].Name}}, nil + i := 0 + for i = 0; i < len(pod.Spec.Volumes); i++ { + if pod.Spec.Volumes[i].Name == volumeName { + break + } + } + + if i == len(pod.Spec.Volumes) { + return nil, errors.Errorf("backup pod %s doesn't have the expected backup volume", pod.Name) + } + + curLog.WithField("pod", pod.Name).Infof("Backup volume is found in pod at index %v", i) + + return &ExposeResult{ByPod: ExposeByPod{HostingPod: pod, VolumeName: volumeName}}, nil } func (e *csiSnapshotExposer) CleanUp(ctx context.Context, ownerObject corev1.ObjectReference, vsName string, sourceNamespace string) { diff --git a/pkg/exposer/csi_snapshot_test.go b/pkg/exposer/csi_snapshot_test.go index 7ea6d5bf0..0caf3f4c8 100644 --- a/pkg/exposer/csi_snapshot_test.go +++ b/pkg/exposer/csi_snapshot_test.go @@ -37,6 +37,8 @@ import ( velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" velerotest "github.com/vmware-tanzu/velero/pkg/test" "github.com/vmware-tanzu/velero/pkg/util/boolptr" + + clientFake "sigs.k8s.io/controller-runtime/pkg/client/fake" ) type reactor struct { @@ -59,6 +61,8 @@ func TestExpose(t *testing.T) { }, } + var restoreSize int64 = 123456 + snapshotClass := "fake-snapshot-class" vsObject := &snapshotv1api.VolumeSnapshot{ ObjectMeta: metav1.ObjectMeta{ @@ -78,11 +82,31 @@ func TestExpose(t *testing.T) { Status: &snapshotv1api.VolumeSnapshotStatus{ BoundVolumeSnapshotContentName: &vscName, ReadyToUse: boolptr.True(), - RestoreSize: &resource.Quantity{}, + RestoreSize: resource.NewQuantity(restoreSize, ""), + }, + } + + vsObjectWithoutRestoreSize := &snapshotv1api.VolumeSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fake-vs", + Namespace: "fake-ns", + Annotations: map[string]string{ + "fake-key-1": "fake-value-1", + "fake-key-2": "fake-value-2", + }, + }, + Spec: snapshotv1api.VolumeSnapshotSpec{ + Source: snapshotv1api.VolumeSnapshotSource{ + VolumeSnapshotContentName: &vscName, + }, + VolumeSnapshotClassName: &snapshotClass, + }, + Status: &snapshotv1api.VolumeSnapshotStatus{ + BoundVolumeSnapshotContentName: &vscName, + ReadyToUse: boolptr.True(), }, } - var restoreSize int64 snapshotHandle := "fake-handle" vscObj := &snapshotv1api.VolumeSnapshotContent{ ObjectMeta: metav1.ObjectMeta{ @@ -116,21 +140,23 @@ func TestExpose(t *testing.T) { } tests := []struct { - name string - snapshotClientObj []runtime.Object - kubeClientObj []runtime.Object - ownerBackup *velerov1.Backup - exposeParam CSISnapshotExposeParam - snapReactors []reactor - kubeReactors []reactor - err string + name string + snapshotClientObj []runtime.Object + kubeClientObj []runtime.Object + ownerBackup *velerov1.Backup + exposeParam CSISnapshotExposeParam + snapReactors []reactor + kubeReactors []reactor + err string + expectedVolumeSize *resource.Quantity }{ { name: "wait vs ready fail", ownerBackup: backup, exposeParam: CSISnapshotExposeParam{ - SnapshotName: "fake-vs", - Timeout: time.Millisecond, + SnapshotName: "fake-vs", + OperationTimeout: time.Millisecond, + ExposeTimeout: time.Millisecond, }, err: "error wait volume snapshot ready: error to get volumesnapshot /fake-vs: volumesnapshots.snapshot.storage.k8s.io \"fake-vs\" not found", }, @@ -138,9 +164,10 @@ func TestExpose(t *testing.T) { name: "get vsc fail", ownerBackup: backup, exposeParam: CSISnapshotExposeParam{ - SnapshotName: "fake-vs", - SourceNamespace: "fake-ns", - Timeout: time.Millisecond, + SnapshotName: "fake-vs", + SourceNamespace: "fake-ns", + OperationTimeout: time.Millisecond, + ExposeTimeout: time.Millisecond, }, snapshotClientObj: []runtime.Object{ vsObject, @@ -151,9 +178,10 @@ func TestExpose(t *testing.T) { name: "delete vs fail", ownerBackup: backup, exposeParam: CSISnapshotExposeParam{ - SnapshotName: "fake-vs", - SourceNamespace: "fake-ns", - Timeout: time.Millisecond, + SnapshotName: "fake-vs", + SourceNamespace: "fake-ns", + OperationTimeout: time.Millisecond, + ExposeTimeout: time.Millisecond, }, snapshotClientObj: []runtime.Object{ vsObject, @@ -174,9 +202,10 @@ func TestExpose(t *testing.T) { name: "delete vsc fail", ownerBackup: backup, exposeParam: CSISnapshotExposeParam{ - SnapshotName: "fake-vs", - SourceNamespace: "fake-ns", - Timeout: time.Millisecond, + SnapshotName: "fake-vs", + SourceNamespace: "fake-ns", + OperationTimeout: time.Millisecond, + ExposeTimeout: time.Millisecond, }, snapshotClientObj: []runtime.Object{ vsObject, @@ -197,9 +226,10 @@ func TestExpose(t *testing.T) { name: "create backup vs fail", ownerBackup: backup, exposeParam: CSISnapshotExposeParam{ - SnapshotName: "fake-vs", - SourceNamespace: "fake-ns", - Timeout: time.Millisecond, + SnapshotName: "fake-vs", + SourceNamespace: "fake-ns", + OperationTimeout: time.Millisecond, + ExposeTimeout: time.Millisecond, }, snapshotClientObj: []runtime.Object{ vsObject, @@ -220,9 +250,10 @@ func TestExpose(t *testing.T) { name: "create backup vsc fail", ownerBackup: backup, exposeParam: CSISnapshotExposeParam{ - SnapshotName: "fake-vs", - SourceNamespace: "fake-ns", - Timeout: time.Millisecond, + SnapshotName: "fake-vs", + SourceNamespace: "fake-ns", + OperationTimeout: time.Millisecond, + ExposeTimeout: time.Millisecond, }, snapshotClientObj: []runtime.Object{ vsObject, @@ -257,10 +288,11 @@ func TestExpose(t *testing.T) { name: "create backup pvc fail", ownerBackup: backup, exposeParam: CSISnapshotExposeParam{ - SnapshotName: "fake-vs", - SourceNamespace: "fake-ns", - Timeout: time.Millisecond, - AccessMode: AccessModeFileSystem, + SnapshotName: "fake-vs", + SourceNamespace: "fake-ns", + OperationTimeout: time.Millisecond, + ExposeTimeout: time.Millisecond, + AccessMode: AccessModeFileSystem, }, snapshotClientObj: []runtime.Object{ vsObject, @@ -281,10 +313,11 @@ func TestExpose(t *testing.T) { name: "create backup pod fail", ownerBackup: backup, exposeParam: CSISnapshotExposeParam{ - SnapshotName: "fake-vs", - SourceNamespace: "fake-ns", - AccessMode: AccessModeFileSystem, - Timeout: time.Millisecond, + SnapshotName: "fake-vs", + SourceNamespace: "fake-ns", + AccessMode: AccessModeFileSystem, + OperationTimeout: time.Millisecond, + ExposeTimeout: time.Millisecond, }, snapshotClientObj: []runtime.Object{ vsObject, @@ -308,10 +341,11 @@ func TestExpose(t *testing.T) { name: "success", ownerBackup: backup, exposeParam: CSISnapshotExposeParam{ - SnapshotName: "fake-vs", - SourceNamespace: "fake-ns", - AccessMode: AccessModeFileSystem, - Timeout: time.Millisecond, + SnapshotName: "fake-vs", + SourceNamespace: "fake-ns", + AccessMode: AccessModeFileSystem, + OperationTimeout: time.Millisecond, + ExposeTimeout: time.Millisecond, }, snapshotClientObj: []runtime.Object{ vsObject, @@ -321,6 +355,26 @@ func TestExpose(t *testing.T) { daemonSet, }, }, + { + name: "restore size from exposeParam", + ownerBackup: backup, + exposeParam: CSISnapshotExposeParam{ + SnapshotName: "fake-vs", + SourceNamespace: "fake-ns", + AccessMode: AccessModeFileSystem, + OperationTimeout: time.Millisecond, + ExposeTimeout: time.Millisecond, + VolumeSize: *resource.NewQuantity(567890, ""), + }, + snapshotClientObj: []runtime.Object{ + vsObjectWithoutRestoreSize, + vscObj, + }, + kubeClientObj: []runtime.Object{ + daemonSet, + }, + expectedVolumeSize: resource.NewQuantity(567890, ""), + }, } for _, test := range tests { @@ -360,7 +414,7 @@ func TestExpose(t *testing.T) { _, err = exposer.kubeClient.CoreV1().Pods(ownerObject.Namespace).Get(context.Background(), ownerObject.Name, metav1.GetOptions{}) assert.NoError(t, err) - _, err = exposer.kubeClient.CoreV1().PersistentVolumeClaims(ownerObject.Namespace).Get(context.Background(), ownerObject.Name, metav1.GetOptions{}) + backupPVC, err := exposer.kubeClient.CoreV1().PersistentVolumeClaims(ownerObject.Namespace).Get(context.Background(), ownerObject.Name, metav1.GetOptions{}) assert.NoError(t, err) expectedVS, err := exposer.csiSnapshotClient.VolumeSnapshots(ownerObject.Namespace).Get(context.Background(), ownerObject.Name, metav1.GetOptions{}) @@ -377,6 +431,12 @@ func TestExpose(t *testing.T) { assert.Equal(t, expectedVSC.Spec.DeletionPolicy, vscObj.Spec.DeletionPolicy) assert.Equal(t, expectedVSC.Spec.Driver, vscObj.Spec.Driver) assert.Equal(t, *expectedVSC.Spec.VolumeSnapshotClassName, *vscObj.Spec.VolumeSnapshotClassName) + + if test.expectedVolumeSize != nil { + assert.Equal(t, *test.expectedVolumeSize, backupPVC.Spec.Resources.Requests[corev1.ResourceStorage]) + } else { + assert.Equal(t, *resource.NewQuantity(restoreSize, ""), backupPVC.Spec.Resources.Requests[corev1.ResourceStorage]) + } } else { assert.EqualError(t, err, test.err) } @@ -384,3 +444,180 @@ func TestExpose(t *testing.T) { }) } } + +func TestGetExpose(t *testing.T) { + backup := &velerov1.Backup{ + TypeMeta: metav1.TypeMeta{ + APIVersion: velerov1.SchemeGroupVersion.String(), + Kind: "Backup", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: velerov1.DefaultNamespace, + Name: "fake-backup", + UID: "fake-uid", + }, + } + + backupPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: backup.Namespace, + Name: backup.Name, + }, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "fake-volume", + }, + { + Name: "fake-volume-2", + }, + { + Name: string(backup.UID), + }, + }, + }, + } + + backupPodWithoutVolume := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: backup.Namespace, + Name: backup.Name, + }, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "fake-volume-1", + }, + { + Name: "fake-volume-2", + }, + }, + }, + } + + backupPVC := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: backup.Namespace, + Name: backup.Name, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + VolumeName: "fake-pv-name", + }, + } + + backupPV := &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fake-pv-name", + }, + } + + scheme := runtime.NewScheme() + corev1.AddToScheme(scheme) + + tests := []struct { + name string + kubeClientObj []runtime.Object + ownerBackup *velerov1.Backup + exposeWaitParam CSISnapshotExposeWaitParam + Timeout time.Duration + err string + expectedResult *ExposeResult + }{ + { + name: "backup pod is not found", + ownerBackup: backup, + exposeWaitParam: CSISnapshotExposeWaitParam{ + NodeName: "fake-node", + }, + }, + { + name: "wait pvc bound fail", + ownerBackup: backup, + exposeWaitParam: CSISnapshotExposeWaitParam{ + NodeName: "fake-node", + }, + kubeClientObj: []runtime.Object{ + backupPod, + }, + Timeout: time.Second, + err: "error to wait backup PVC bound, fake-backup: error to wait for rediness of PVC: error to get pvc velero/fake-backup: persistentvolumeclaims \"fake-backup\" not found", + }, + { + name: "backup volume not found in pod", + ownerBackup: backup, + exposeWaitParam: CSISnapshotExposeWaitParam{ + NodeName: "fake-node", + }, + kubeClientObj: []runtime.Object{ + backupPodWithoutVolume, + backupPVC, + backupPV, + }, + Timeout: time.Second, + err: "backup pod fake-backup doesn't have the expected backup volume", + }, + { + name: "succeed", + ownerBackup: backup, + exposeWaitParam: CSISnapshotExposeWaitParam{ + NodeName: "fake-node", + }, + kubeClientObj: []runtime.Object{ + backupPod, + backupPVC, + backupPV, + }, + Timeout: time.Second, + expectedResult: &ExposeResult{ + ByPod: ExposeByPod{ + HostingPod: backupPod, + VolumeName: string(backup.UID), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...) + + fakeClientBuilder := clientFake.NewClientBuilder() + fakeClientBuilder = fakeClientBuilder.WithScheme(scheme) + + fakeClient := fakeClientBuilder.WithRuntimeObjects(test.kubeClientObj...).Build() + + exposer := csiSnapshotExposer{ + kubeClient: fakeKubeClient, + log: velerotest.NewLogger(), + } + + var ownerObject corev1.ObjectReference + if test.ownerBackup != nil { + ownerObject = corev1.ObjectReference{ + Kind: test.ownerBackup.Kind, + Namespace: test.ownerBackup.Namespace, + Name: test.ownerBackup.Name, + UID: test.ownerBackup.UID, + APIVersion: test.ownerBackup.APIVersion, + } + } + + test.exposeWaitParam.NodeClient = fakeClient + + result, err := exposer.GetExposed(context.Background(), ownerObject, test.Timeout, &test.exposeWaitParam) + if test.err == "" { + assert.NoError(t, err) + + if test.expectedResult == nil { + assert.Nil(t, result) + } else { + assert.NoError(t, err) + assert.Equal(t, test.expectedResult.ByPod.VolumeName, result.ByPod.VolumeName) + assert.Equal(t, test.expectedResult.ByPod.HostingPod.Name, result.ByPod.HostingPod.Name) + } + } else { + assert.EqualError(t, err, test.err) + } + }) + } +} diff --git a/pkg/features/mocks/PluginFinder.go b/pkg/features/mocks/PluginFinder.go new file mode 100644 index 000000000..7659c1772 --- /dev/null +++ b/pkg/features/mocks/PluginFinder.go @@ -0,0 +1,43 @@ +// Code generated by mockery v2.22.1. DO NOT EDIT. + +package mocks + +import ( + common "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" + + mock "github.com/stretchr/testify/mock" +) + +// PluginFinder is an autogenerated mock type for the PluginFinder type +type PluginFinder struct { + mock.Mock +} + +// Find provides a mock function with given fields: kind, name +func (_m *PluginFinder) Find(kind common.PluginKind, name string) bool { + ret := _m.Called(kind, name) + + var r0 bool + if rf, ok := ret.Get(0).(func(common.PluginKind, string) bool); ok { + r0 = rf(kind, name) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +type mockConstructorTestingTNewPluginFinder interface { + mock.TestingT + Cleanup(func()) +} + +// NewPluginFinder creates a new instance of PluginFinder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewPluginFinder(t mockConstructorTestingTNewPluginFinder) *PluginFinder { + mock := &PluginFinder{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/features/mocks/Verifier.go b/pkg/features/mocks/Verifier.go new file mode 100644 index 000000000..e391345a2 --- /dev/null +++ b/pkg/features/mocks/Verifier.go @@ -0,0 +1,49 @@ +// Code generated by mockery v2.22.1. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// Verifier is an autogenerated mock type for the Verifier type +type Verifier struct { + mock.Mock +} + +// Verify provides a mock function with given fields: name +func (_m *Verifier) Verify(name string) (bool, error) { + ret := _m.Called(name) + + var r0 bool + var r1 error + if rf, ok := ret.Get(0).(func(string) (bool, error)); ok { + return rf(name) + } + if rf, ok := ret.Get(0).(func(string) bool); ok { + r0 = rf(name) + } else { + r0 = ret.Get(0).(bool) + } + + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(name) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type mockConstructorTestingTNewVerifier interface { + mock.TestingT + Cleanup(func()) +} + +// NewVerifier creates a new instance of Verifier. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewVerifier(t mockConstructorTestingTNewVerifier) *Verifier { + mock := &Verifier{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/features/verify.go b/pkg/features/verify.go new file mode 100644 index 000000000..0474d5076 --- /dev/null +++ b/pkg/features/verify.go @@ -0,0 +1,71 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package features + +import ( + "errors" + + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" +) + +type PluginFinder interface { + Find(kind common.PluginKind, name string) bool +} + +type Verifier interface { + Verify(name string) (bool, error) +} + +type verifier struct { + finder PluginFinder +} + +func NewVerifier(finder PluginFinder) Verifier { + return &verifier{ + finder: finder, + } +} + +func (v *verifier) Verify(name string) (bool, error) { + enabled := IsEnabled(name) + + switch name { + case velerov1api.CSIFeatureFlag: + return verifyCSIFeature(v.finder, enabled) + default: + return false, nil + } +} + +func verifyCSIFeature(finder PluginFinder, enabled bool) (bool, error) { + installed := false + installed = finder.Find(common.PluginKindBackupItemActionV2, "velero.io/csi-pvc-backupper") + if installed { + installed = finder.Find(common.PluginKindRestoreItemActionV2, "velero.io/csi-pvc-restorer") + } + + if !enabled && installed { + return false, errors.New("CSI plugins are registered, but the EnableCSI feature is not enabled") + } else if enabled && !installed { + return false, errors.New("CSI feature is enabled, but CSI plugins are not registered") + } else if !enabled && !installed { + return false, nil + } else { + return true, nil + } +} diff --git a/pkg/features/verify_test.go b/pkg/features/verify_test.go new file mode 100644 index 000000000..fee6b2b2d --- /dev/null +++ b/pkg/features/verify_test.go @@ -0,0 +1,61 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package features + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + findermocks "github.com/vmware-tanzu/velero/pkg/features/mocks" +) + +func TestVerify(t *testing.T) { + NewFeatureFlagSet() + verifier := verifier{} + + finder := new(findermocks.PluginFinder) + finder.On("Find", mock.Anything, mock.Anything).Return(false) + verifier.finder = finder + ready, err := verifier.Verify("EnableCSI") + assert.Equal(t, false, ready) + assert.Nil(t, err) + + finder = new(findermocks.PluginFinder) + finder.On("Find", mock.Anything, mock.Anything).Return(true) + verifier.finder = finder + ready, err = verifier.Verify("EnableCSI") + assert.Equal(t, false, ready) + assert.EqualError(t, err, "CSI plugins are registered, but the EnableCSI feature is not enabled") + + Enable("EnableCSI") + finder = new(findermocks.PluginFinder) + finder.On("Find", mock.Anything, mock.Anything).Return(false) + verifier.finder = finder + ready, err = verifier.Verify("EnableCSI") + assert.Equal(t, false, ready) + assert.EqualError(t, err, "CSI feature is enabled, but CSI plugins are not registered") + + Enable("EnableCSI") + finder = new(findermocks.PluginFinder) + finder.On("Find", mock.Anything, mock.Anything).Return(true) + verifier.finder = finder + ready, err = verifier.Verify("EnableCSI") + assert.Equal(t, true, ready) + assert.Nil(t, err) +} diff --git a/pkg/nodeagent/node_agent.go b/pkg/nodeagent/node_agent.go index 83e76d2a4..ff93ed596 100644 --- a/pkg/nodeagent/node_agent.go +++ b/pkg/nodeagent/node_agent.go @@ -18,28 +18,52 @@ package nodeagent import ( "context" + "encoding/json" "fmt" "github.com/pkg/errors" v1 "k8s.io/api/core/v1" - "k8s.io/client-go/kubernetes" - - "github.com/vmware-tanzu/velero/pkg/util/kube" - apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/kubernetes" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/vmware-tanzu/velero/pkg/util/kube" ) const ( // daemonSet is the name of the Velero node agent daemonset. - daemonSet = "node-agent" + daemonSet = "node-agent" + configName = "node-agent-configs" + dataPathConConfigName = "data-path-concurrency" ) var ( ErrDaemonSetNotFound = errors.New("daemonset not found") ) +type DataPathConcurrency struct { + // GlobalConfig specifies the concurrency number to all nodes for which per-node config is not specified + GlobalConfig int `json:"globalConfig,omitempty"` + + // PerNodeConfig specifies the concurrency number to nodes matched by rules + PerNodeConfig []RuledConfigs `json:"perNodeConfig,omitempty"` +} + +type RuledConfigs struct { + // NodeSelector specifies the label selector to match nodes + NodeSelector metav1.LabelSelector `json:"nodeSelector"` + + // Number specifies the number value associated to the matched nodes + Number int `json:"number"` +} + +type Configs struct { + // DataPathConcurrency is the config for data path concurrency per node. + DataPathConcurrency *DataPathConcurrency `json:"dataPathConcurrency,omitempty"` +} + // IsRunning checks if the node agent daemonset is running properly. If not, return the error found func IsRunning(ctx context.Context, kubeClient kubernetes.Interface, namespace string) error { if _, err := kubeClient.AppsV1().DaemonSets(namespace).Get(ctx, daemonSet, metav1.GetOptions{}); apierrors.IsNotFound(err) { @@ -52,12 +76,18 @@ func IsRunning(ctx context.Context, kubeClient kubernetes.Interface, namespace s } // IsRunningInNode checks if the node agent pod is running properly in a specified node. If not, return the error found -func IsRunningInNode(ctx context.Context, namespace string, nodeName string, podClient corev1client.PodsGetter) error { +func IsRunningInNode(ctx context.Context, namespace string, nodeName string, crClient ctrlclient.Client) error { if nodeName == "" { return errors.New("node name is empty") } - pods, err := podClient.Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("name=%s", daemonSet)}) + pods := new(v1.PodList) + parsedSelector, err := labels.Parse(fmt.Sprintf("name=%s", daemonSet)) + if err != nil { + return errors.Wrap(err, "fail to parse selector") + } + + err = crClient.List(ctx, pods, &ctrlclient.ListOptions{LabelSelector: parsedSelector}) if err != nil { return errors.Wrap(err, "failed to list daemonset pods") } @@ -83,3 +113,31 @@ func GetPodSpec(ctx context.Context, kubeClient kubernetes.Interface, namespace return &ds.Spec.Template.Spec, nil } + +func GetConfigs(ctx context.Context, namespace string, kubeClient kubernetes.Interface) (*Configs, error) { + cm, err := kubeClient.CoreV1().ConfigMaps(namespace).Get(ctx, configName, metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + return nil, nil + } else { + return nil, errors.Wrapf(err, "error to get node agent configs %s", configName) + } + } + + if cm.Data == nil { + return nil, errors.Errorf("data is not available in config map %s", configName) + } + + jsonString := "" + for _, v := range cm.Data { + jsonString = v + } + + configs := &Configs{} + err = json.Unmarshal([]byte(jsonString), configs) + if err != nil { + return nil, errors.Wrapf(err, "error to unmarshall configs from %s", configName) + } + + return configs, nil +} diff --git a/pkg/nodeagent/node_agent_test.go b/pkg/nodeagent/node_agent_test.go new file mode 100644 index 000000000..a18e45b14 --- /dev/null +++ b/pkg/nodeagent/node_agent_test.go @@ -0,0 +1,337 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nodeagent + +import ( + "context" + "testing" + + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + clientTesting "k8s.io/client-go/testing" + clientFake "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/vmware-tanzu/velero/pkg/builder" +) + +type reactor struct { + verb string + resource string + reactorFunc clientTesting.ReactionFunc +} + +func TestIsRunning(t *testing.T) { + daemonSet := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "fake-ns", + Name: "node-agent", + }, + TypeMeta: metav1.TypeMeta{ + Kind: "DaemonSet", + }, + } + + tests := []struct { + name string + kubeClientObj []runtime.Object + namespace string + kubeReactors []reactor + expectErr string + }{ + { + name: "ds is not found", + namespace: "fake-ns", + expectErr: "daemonset not found", + }, + { + name: "ds get error", + namespace: "fake-ns", + kubeReactors: []reactor{ + { + verb: "get", + resource: "daemonsets", + reactorFunc: func(action clientTesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("fake-get-error") + }, + }, + }, + expectErr: "fake-get-error", + }, + { + name: "succeed", + namespace: "fake-ns", + kubeClientObj: []runtime.Object{ + daemonSet, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...) + + for _, reactor := range test.kubeReactors { + fakeKubeClient.Fake.PrependReactor(reactor.verb, reactor.resource, reactor.reactorFunc) + } + + err := IsRunning(context.TODO(), fakeKubeClient, test.namespace) + if test.expectErr == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, test.expectErr) + } + }) + } +} + +func TestIsRunningInNode(t *testing.T) { + scheme := runtime.NewScheme() + corev1.AddToScheme(scheme) + + nonNodeAgentPod := builder.ForPod("fake-ns", "fake-pod").Result() + nodeAgentPodNotRunning := builder.ForPod("fake-ns", "fake-pod").Labels(map[string]string{"name": "node-agent"}).Result() + nodeAgentPodRunning1 := builder.ForPod("fake-ns", "fake-pod-1").Labels(map[string]string{"name": "node-agent"}).Phase(corev1.PodRunning).Result() + nodeAgentPodRunning2 := builder.ForPod("fake-ns", "fake-pod-2").Labels(map[string]string{"name": "node-agent"}).Phase(corev1.PodRunning).Result() + nodeAgentPodRunning3 := builder.ForPod("fake-ns", "fake-pod-3"). + Labels(map[string]string{"name": "node-agent"}). + Phase(corev1.PodRunning). + NodeName("fake-node"). + Result() + + tests := []struct { + name string + kubeClientObj []runtime.Object + nodeName string + expectErr string + }{ + { + name: "node name is empty", + expectErr: "node name is empty", + }, + { + name: "ds pod not found", + nodeName: "fake-node", + kubeClientObj: []runtime.Object{ + nonNodeAgentPod, + }, + expectErr: "daemonset pod not found in running state in node fake-node", + }, + { + name: "ds po are not all running", + nodeName: "fake-node", + kubeClientObj: []runtime.Object{ + nodeAgentPodNotRunning, + nodeAgentPodRunning1, + }, + expectErr: "daemonset pod not found in running state in node fake-node", + }, + { + name: "ds pods wrong node name", + nodeName: "fake-node", + kubeClientObj: []runtime.Object{ + nodeAgentPodNotRunning, + nodeAgentPodRunning1, + nodeAgentPodRunning2, + }, + expectErr: "daemonset pod not found in running state in node fake-node", + }, + { + name: "succeed", + nodeName: "fake-node", + kubeClientObj: []runtime.Object{ + nodeAgentPodNotRunning, + nodeAgentPodRunning1, + nodeAgentPodRunning2, + nodeAgentPodRunning3, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeClientBuilder := clientFake.NewClientBuilder() + fakeClientBuilder = fakeClientBuilder.WithScheme(scheme) + + fakeClient := fakeClientBuilder.WithRuntimeObjects(test.kubeClientObj...).Build() + + err := IsRunningInNode(context.TODO(), "", test.nodeName, fakeClient) + if test.expectErr == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, test.expectErr) + } + }) + } +} + +func TestGetPodSpec(t *testing.T) { + podSpec := corev1.PodSpec{ + NodeName: "fake-node", + } + + daemonSet := &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "fake-ns", + Name: "node-agent", + }, + TypeMeta: metav1.TypeMeta{ + Kind: "DaemonSet", + }, + Spec: appsv1.DaemonSetSpec{ + Template: corev1.PodTemplateSpec{ + Spec: podSpec, + }, + }, + } + + tests := []struct { + name string + kubeClientObj []runtime.Object + namespace string + expectErr string + expectSpec corev1.PodSpec + }{ + { + name: "ds is not found", + namespace: "fake-ns", + expectErr: "error to get node-agent daemonset: daemonsets.apps \"node-agent\" not found", + }, + { + name: "succeed", + namespace: "fake-ns", + kubeClientObj: []runtime.Object{ + daemonSet, + }, + expectSpec: podSpec, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...) + + spec, err := GetPodSpec(context.TODO(), fakeKubeClient, test.namespace) + if test.expectErr == "" { + assert.NoError(t, err) + assert.Equal(t, *spec, test.expectSpec) + } else { + assert.EqualError(t, err, test.expectErr) + } + }) + } +} + +func TestGetConfigs(t *testing.T) { + cm := builder.ForConfigMap("fake-ns", "node-agent-configs").Result() + cmWithInvalidDataFormat := builder.ForConfigMap("fake-ns", "node-agent-configs").Data("fake-key", "wrong").Result() + cmWithoutCocurrentData := builder.ForConfigMap("fake-ns", "node-agent-configs").Data("fake-key", "{\"someothers\":{\"someother\": 10}}").Result() + cmWithValidData := builder.ForConfigMap("fake-ns", "node-agent-configs").Data("fake-key", "{\"dataPathConcurrency\":{\"globalConfig\": 5}}").Result() + + tests := []struct { + name string + kubeClientObj []runtime.Object + namespace string + kubeReactors []reactor + expectResult *Configs + expectErr string + }{ + { + name: "cm is not found", + namespace: "fake-ns", + }, + { + name: "cm get error", + namespace: "fake-ns", + kubeReactors: []reactor{ + { + verb: "get", + resource: "configmaps", + reactorFunc: func(action clientTesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("fake-get-error") + }, + }, + }, + expectErr: "error to get node agent configs node-agent-configs: fake-get-error", + }, + { + name: "cm's data is nil", + namespace: "fake-ns", + kubeClientObj: []runtime.Object{ + cm, + }, + expectErr: "data is not available in config map node-agent-configs", + }, + { + name: "cm's data is with invalid format", + namespace: "fake-ns", + kubeClientObj: []runtime.Object{ + cmWithInvalidDataFormat, + }, + expectErr: "error to unmarshall configs from node-agent-configs: invalid character 'w' looking for beginning of value", + }, + { + name: "concurrency configs are not found", + namespace: "fake-ns", + kubeClientObj: []runtime.Object{ + cmWithoutCocurrentData, + }, + expectResult: &Configs{nil}, + }, + { + name: "success", + namespace: "fake-ns", + kubeClientObj: []runtime.Object{ + cmWithValidData, + }, + expectResult: &Configs{ + DataPathConcurrency: &DataPathConcurrency{ + GlobalConfig: 5, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeKubeClient := fake.NewSimpleClientset(test.kubeClientObj...) + + for _, reactor := range test.kubeReactors { + fakeKubeClient.Fake.PrependReactor(reactor.verb, reactor.resource, reactor.reactorFunc) + } + + result, err := GetConfigs(context.TODO(), test.namespace, fakeKubeClient) + if test.expectErr == "" { + assert.NoError(t, err) + + if test.expectResult == nil { + assert.Nil(t, result) + } else if test.expectResult.DataPathConcurrency == nil { + assert.Nil(t, result.DataPathConcurrency) + } else { + assert.Equal(t, *test.expectResult.DataPathConcurrency, *result.DataPathConcurrency) + } + } else { + assert.EqualError(t, err, test.expectErr) + } + }) + } +} diff --git a/pkg/persistence/object_store.go b/pkg/persistence/object_store.go index d0fd9c1f1..26bddc565 100644 --- a/pkg/persistence/object_store.go +++ b/pkg/persistence/object_store.go @@ -27,13 +27,14 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/runtime/serializer" kerrors "k8s.io/apimachinery/pkg/util/errors" "github.com/vmware-tanzu/velero/internal/credentials" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme" "github.com/vmware-tanzu/velero/pkg/itemoperation" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + "github.com/vmware-tanzu/velero/pkg/util" "github.com/vmware-tanzu/velero/pkg/volume" ) @@ -49,7 +50,8 @@ type BackupInfo struct { BackupResourceList, CSIVolumeSnapshots, CSIVolumeSnapshotContents, - CSIVolumeSnapshotClasses io.Reader + CSIVolumeSnapshotClasses, + BackupVolumeInfo io.Reader } // BackupStore defines operations for creating, retrieving, and deleting @@ -269,6 +271,7 @@ func (s *objectBackupStore) PutBackup(info BackupInfo) error { s.layout.getCSIVolumeSnapshotContentsKey(info.Name): info.CSIVolumeSnapshotContents, s.layout.getCSIVolumeSnapshotClassesKey(info.Name): info.CSIVolumeSnapshotClasses, s.layout.getBackupResultsKey(info.Name): info.BackupResults, + s.layout.getBackupVolumeInfoKey(info.Name): info.BackupVolumeInfo, } for key, reader := range backupObjs { @@ -302,7 +305,9 @@ func (s *objectBackupStore) GetBackupMetadata(name string) (*velerov1api.Backup, return nil, errors.WithStack(err) } - decoder := scheme.Codecs.UniversalDecoder(velerov1api.SchemeGroupVersion) + codecFactory := serializer.NewCodecFactory(util.VeleroScheme) + + decoder := codecFactory.UniversalDecoder(velerov1api.SchemeGroupVersion) obj, _, err := decoder.Decode(data, nil, nil) if err != nil { return nil, errors.WithStack(err) @@ -488,6 +493,25 @@ func (s *objectBackupStore) GetPodVolumeBackups(name string) ([]*velerov1api.Pod return podVolumeBackups, nil } +func (s *objectBackupStore) GetBackupVolumeInfos(name string) (*volume.VolumeInfos, error) { + var volumeInfos *volume.VolumeInfos + + res, err := tryGet(s.objectStore, s.bucket, s.layout.getBackupVolumeInfoKey(name)) + if err != nil { + return volumeInfos, err + } + if res == nil { + return volumeInfos, nil + } + defer res.Close() + + if err := decode(res, &volumeInfos); err != nil { + return volumeInfos, err + } + + return volumeInfos, nil +} + func (s *objectBackupStore) GetBackupContents(name string) (io.ReadCloser, error) { return s.objectStore.GetObject(s.bucket, s.layout.getBackupContentsKey(name)) } diff --git a/pkg/persistence/object_store_layout.go b/pkg/persistence/object_store_layout.go index 134562337..fff920d0a 100644 --- a/pkg/persistence/object_store_layout.go +++ b/pkg/persistence/object_store_layout.go @@ -128,3 +128,7 @@ func (l *ObjectStoreLayout) getCSIVolumeSnapshotClassesKey(backup string) string func (l *ObjectStoreLayout) getBackupResultsKey(backup string) string { return path.Join(l.subdirs["backups"], backup, fmt.Sprintf("%s-results.gz", backup)) } + +func (l *ObjectStoreLayout) getBackupVolumeInfoKey(backup string) string { + return path.Join(l.subdirs["backups"], backup, fmt.Sprintf("%s-volumeinfos.json.gz", backup)) +} diff --git a/pkg/persistence/object_store_test.go b/pkg/persistence/object_store_test.go index de632d924..6c149f188 100644 --- a/pkg/persistence/object_store_test.go +++ b/pkg/persistence/object_store_test.go @@ -227,6 +227,7 @@ func TestPutBackup(t *testing.T) { snapshots io.Reader backupItemOperations io.Reader resourceList io.Reader + backupVolumeInfo io.Reader expectedErr string expectedKeys []string }{ @@ -239,6 +240,7 @@ func TestPutBackup(t *testing.T) { snapshots: newStringReadSeeker("snapshots"), backupItemOperations: newStringReadSeeker("backupItemOperations"), resourceList: newStringReadSeeker("resourceList"), + backupVolumeInfo: newStringReadSeeker("backupVolumeInfo"), expectedErr: "", expectedKeys: []string{ "backups/backup-1/velero-backup.json", @@ -248,6 +250,7 @@ func TestPutBackup(t *testing.T) { "backups/backup-1/backup-1-volumesnapshots.json.gz", "backups/backup-1/backup-1-itemoperations.json.gz", "backups/backup-1/backup-1-resource-list.json.gz", + "backups/backup-1/backup-1-volumeinfos.json.gz", }, }, { @@ -260,6 +263,7 @@ func TestPutBackup(t *testing.T) { snapshots: newStringReadSeeker("snapshots"), backupItemOperations: newStringReadSeeker("backupItemOperations"), resourceList: newStringReadSeeker("resourceList"), + backupVolumeInfo: newStringReadSeeker("backupVolumeInfo"), expectedErr: "", expectedKeys: []string{ "prefix-1/backups/backup-1/velero-backup.json", @@ -269,6 +273,7 @@ func TestPutBackup(t *testing.T) { "prefix-1/backups/backup-1/backup-1-volumesnapshots.json.gz", "prefix-1/backups/backup-1/backup-1-itemoperations.json.gz", "prefix-1/backups/backup-1/backup-1-resource-list.json.gz", + "prefix-1/backups/backup-1/backup-1-volumeinfos.json.gz", }, }, { @@ -280,6 +285,7 @@ func TestPutBackup(t *testing.T) { snapshots: newStringReadSeeker("snapshots"), backupItemOperations: newStringReadSeeker("backupItemOperations"), resourceList: newStringReadSeeker("resourceList"), + backupVolumeInfo: newStringReadSeeker("backupVolumeInfo"), expectedErr: "error readers return errors", expectedKeys: []string{"backups/backup-1/backup-1-logs.gz"}, }, @@ -291,6 +297,7 @@ func TestPutBackup(t *testing.T) { snapshots: newStringReadSeeker("snapshots"), backupItemOperations: newStringReadSeeker("backupItemOperations"), resourceList: newStringReadSeeker("resourceList"), + backupVolumeInfo: newStringReadSeeker("backupVolumeInfo"), expectedErr: "error readers return errors", expectedKeys: []string{"backups/backup-1/backup-1-logs.gz"}, }, @@ -303,6 +310,7 @@ func TestPutBackup(t *testing.T) { snapshots: newStringReadSeeker("snapshots"), backupItemOperations: newStringReadSeeker("backupItemOperations"), resourceList: newStringReadSeeker("resourceList"), + backupVolumeInfo: newStringReadSeeker("backupVolumeInfo"), expectedErr: "", expectedKeys: []string{ "backups/backup-1/velero-backup.json", @@ -311,23 +319,26 @@ func TestPutBackup(t *testing.T) { "backups/backup-1/backup-1-volumesnapshots.json.gz", "backups/backup-1/backup-1-itemoperations.json.gz", "backups/backup-1/backup-1-resource-list.json.gz", + "backups/backup-1/backup-1-volumeinfos.json.gz", }, }, { - name: "data should be uploaded even when metadata is nil", - metadata: nil, - contents: newStringReadSeeker("contents"), - log: newStringReadSeeker("log"), - podVolumeBackup: newStringReadSeeker("podVolumeBackup"), - snapshots: newStringReadSeeker("snapshots"), - resourceList: newStringReadSeeker("resourceList"), - expectedErr: "", + name: "data should be uploaded even when metadata is nil", + metadata: nil, + contents: newStringReadSeeker("contents"), + log: newStringReadSeeker("log"), + podVolumeBackup: newStringReadSeeker("podVolumeBackup"), + snapshots: newStringReadSeeker("snapshots"), + resourceList: newStringReadSeeker("resourceList"), + backupVolumeInfo: newStringReadSeeker("backupVolumeInfo"), + expectedErr: "", expectedKeys: []string{ "backups/backup-1/backup-1.tar.gz", "backups/backup-1/backup-1-logs.gz", "backups/backup-1/backup-1-podvolumebackups.json.gz", "backups/backup-1/backup-1-volumesnapshots.json.gz", "backups/backup-1/backup-1-resource-list.json.gz", + "backups/backup-1/backup-1-volumeinfos.json.gz", }, }, } @@ -345,6 +356,7 @@ func TestPutBackup(t *testing.T) { VolumeSnapshots: tc.snapshots, BackupItemOperations: tc.backupItemOperations, BackupResourceList: tc.resourceList, + BackupVolumeInfo: tc.backupVolumeInfo, } err := harness.PutBackup(backupInfo) @@ -1045,6 +1057,90 @@ func TestNewObjectBackupStoreGetterConfig(t *testing.T) { } } +func TestGetBackupVolumeInfos(t *testing.T) { + tests := []struct { + name string + volumeInfo *volume.VolumeInfos + volumeInfoStr string + expectedErr string + expectedResult []volume.VolumeInfo + }{ + { + name: "No VolumeInfos, expect no error.", + }, + { + name: "Valid VolumeInfo, should pass.", + volumeInfo: &volume.VolumeInfos{ + VolumeInfos: []volume.VolumeInfo{ + { + PVCName: "pvcName", + PVName: "pvName", + Skipped: true, + SnapshotDataMoved: false, + }, + }, + }, + expectedResult: []volume.VolumeInfo{ + { + PVCName: "pvcName", + PVName: "pvName", + Skipped: true, + SnapshotDataMoved: false, + }, + }, + }, + { + name: "Invalid VolumeInfo string, should also pass.", + volumeInfoStr: `{"volumeInfos": [{"abc": "123", "def": "456", "pvcName": "pvcName"}]}`, + expectedResult: []volume.VolumeInfo{ + { + PVCName: "pvcName", + }, + }, + }, + } + + harness := newObjectBackupStoreTestHarness("test-bucket", "") + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + if tc.volumeInfo != nil { + obj := new(bytes.Buffer) + gzw := gzip.NewWriter(obj) + + require.NoError(t, json.NewEncoder(gzw).Encode(tc.volumeInfo)) + require.NoError(t, gzw.Close()) + harness.objectStore.PutObject(harness.bucket, "backups/test-backup/test-backup-volumeinfos.json.gz", obj) + } + + if tc.volumeInfoStr != "" { + obj := new(bytes.Buffer) + gzw := gzip.NewWriter(obj) + _, err := gzw.Write([]byte(tc.volumeInfoStr)) + require.NoError(t, err) + + require.NoError(t, gzw.Close()) + harness.objectStore.PutObject(harness.bucket, "backups/test-backup/test-backup-volumeinfos.json.gz", obj) + } + + result, err := harness.GetBackupVolumeInfos("test-backup") + if tc.expectedErr != "" { + require.Equal(t, tc.expectedErr, err.Error()) + } else { + if err != nil { + fmt.Println(err.Error()) + } + require.NoError(t, err) + } + + if len(tc.expectedResult) > 0 { + require.Equal(t, tc.expectedResult, result.VolumeInfos) + } + + }) + } +} + func encodeToBytes(obj runtime.Object) []byte { res, err := encode.Encode(obj, "json") if err != nil { diff --git a/pkg/plugin/clientmgmt/manager_test.go b/pkg/plugin/clientmgmt/manager_test.go index 1e8c14926..3a1c39529 100644 --- a/pkg/plugin/clientmgmt/manager_test.go +++ b/pkg/plugin/clientmgmt/manager_test.go @@ -61,6 +61,10 @@ func (r *mockRegistry) Get(kind common.PluginKind, name string) (framework.Plugi return id, args.Error(1) } +func (r *mockRegistry) Find(kind common.PluginKind, name string) bool { + return false +} + func TestNewManager(t *testing.T) { logger := test.NewLogger() logLevel := logrus.InfoLevel diff --git a/pkg/plugin/clientmgmt/process/registry.go b/pkg/plugin/clientmgmt/process/registry.go index 6238c45fb..7845f79ef 100644 --- a/pkg/plugin/clientmgmt/process/registry.go +++ b/pkg/plugin/clientmgmt/process/registry.go @@ -37,6 +37,9 @@ type Registry interface { List(kind common.PluginKind) []framework.PluginIdentifier // Get returns the PluginIdentifier for kind and name. Get(kind common.PluginKind, name string) (framework.PluginIdentifier, error) + + // Find checks if the specified plugin exists in the registry + Find(kind common.PluginKind, name string) bool } // KindAndName is a convenience struct that combines a PluginKind and a name. @@ -125,6 +128,12 @@ func (r *registry) Get(kind common.PluginKind, name string) (framework.PluginIde return p, nil } +// Contain if the specified plugin exists in the registry +func (r *registry) Find(kind common.PluginKind, name string) bool { + _, found := r.pluginsByID[KindAndName{Kind: kind, Name: name}] + return found +} + // readPluginsDir recursively reads dir looking for plugins. func (r *registry) readPluginsDir(dir string) ([]string, error) { if _, err := r.fs.Stat(dir); err != nil { diff --git a/pkg/plugin/framework/common/plugin_kinds.go b/pkg/plugin/framework/common/plugin_kinds.go index 5cb06b930..83cbcef31 100644 --- a/pkg/plugin/framework/common/plugin_kinds.go +++ b/pkg/plugin/framework/common/plugin_kinds.go @@ -41,7 +41,7 @@ const ( // PluginKindRestoreItemAction represents a restore item action plugin. PluginKindRestoreItemAction PluginKind = "RestoreItemAction" - // PluginKindRestoreItemAction represents a v2 restore item action plugin. + // PluginKindRestoreItemActionV2 represents a v2 restore item action plugin. PluginKindRestoreItemActionV2 PluginKind = "RestoreItemActionV2" // PluginKindDeleteItemAction represents a delete item action plugin. diff --git a/pkg/podvolume/backupper.go b/pkg/podvolume/backupper.go index 951084969..3239f10f2 100644 --- a/pkg/podvolume/backupper.go +++ b/pkg/podvolume/backupper.go @@ -26,13 +26,13 @@ import ( corev1api "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" - corev1client "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/cache" + ctrlcache "sigs.k8s.io/controller-runtime/pkg/cache" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" "github.com/vmware-tanzu/velero/internal/resourcepolicies" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" veleroclient "github.com/vmware-tanzu/velero/pkg/client" - clientset "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" "github.com/vmware-tanzu/velero/pkg/label" "github.com/vmware-tanzu/velero/pkg/nodeagent" "github.com/vmware-tanzu/velero/pkg/repository" @@ -50,10 +50,7 @@ type backupper struct { ctx context.Context repoLocker *repository.RepoLocker repoEnsurer *repository.Ensurer - veleroClient clientset.Interface - pvcClient corev1client.PersistentVolumeClaimsGetter - pvClient corev1client.PersistentVolumesGetter - podClient corev1client.PodsGetter + crClient ctrlclient.Client uploaderType string results map[string]chan *velerov1api.PodVolumeBackup @@ -103,32 +100,31 @@ func newBackupper( ctx context.Context, repoLocker *repository.RepoLocker, repoEnsurer *repository.Ensurer, - podVolumeBackupInformer cache.SharedIndexInformer, - veleroClient clientset.Interface, - pvcClient corev1client.PersistentVolumeClaimsGetter, - pvClient corev1client.PersistentVolumesGetter, - podClient corev1client.PodsGetter, + pvbInformer ctrlcache.Informer, + crClient ctrlclient.Client, uploaderType string, + backup *velerov1api.Backup, log logrus.FieldLogger, ) *backupper { b := &backupper{ ctx: ctx, repoLocker: repoLocker, repoEnsurer: repoEnsurer, - veleroClient: veleroClient, - pvcClient: pvcClient, - pvClient: pvClient, - podClient: podClient, + crClient: crClient, uploaderType: uploaderType, results: make(map[string]chan *velerov1api.PodVolumeBackup), } - podVolumeBackupInformer.AddEventHandler( + pvbInformer.AddEventHandler( cache.ResourceEventHandlerFuncs{ UpdateFunc: func(_, obj interface{}) { pvb := obj.(*velerov1api.PodVolumeBackup) + if pvb.GetLabels()[velerov1api.BackupUIDLabel] != string(backup.UID) { + return + } + if pvb.Status.Phase == velerov1api.PodVolumeBackupPhaseCompleted || pvb.Status.Phase == velerov1api.PodVolumeBackupPhaseFailed { b.resultsLock.Lock() defer b.resultsLock.Unlock() @@ -153,7 +149,8 @@ func resultsKey(ns, name string) string { func (b *backupper) getMatchAction(resPolicies *resourcepolicies.Policies, pvc *corev1api.PersistentVolumeClaim, volume *corev1api.Volume) (*resourcepolicies.Action, error) { if pvc != nil { - pv, err := b.pvClient.PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) + pv := new(corev1api.PersistentVolume) + err := b.crClient.Get(context.TODO(), ctrlclient.ObjectKey{Name: pvc.Spec.VolumeName}, pv) if err != nil { return nil, errors.Wrapf(err, "error getting pv for pvc %s", pvc.Spec.VolumeName) } @@ -173,7 +170,7 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api. } log.Infof("pod %s/%s has volumes to backup: %v", pod.Namespace, pod.Name, volumesToBackup) - err := nodeagent.IsRunningInNode(b.ctx, backup.Namespace, pod.Spec.NodeName, b.podClient) + err := nodeagent.IsRunningInNode(b.ctx, backup.Namespace, pod.Spec.NodeName, b.crClient) if err != nil { return nil, nil, []error{err} } @@ -213,7 +210,8 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api. for _, podVolume := range pod.Spec.Volumes { podVolumes[podVolume.Name] = podVolume if podVolume.PersistentVolumeClaim != nil { - pvc, err := b.pvcClient.PersistentVolumeClaims(pod.Namespace).Get(context.TODO(), podVolume.PersistentVolumeClaim.ClaimName, metav1.GetOptions{}) + pvc := new(corev1api.PersistentVolumeClaim) + err := b.crClient.Get(context.TODO(), ctrlclient.ObjectKey{Namespace: pod.Namespace, Name: podVolume.PersistentVolumeClaim.ClaimName}, pvc) if err != nil { errs = append(errs, errors.Wrap(err, "error getting persistent volume claim for volume")) continue @@ -263,7 +261,7 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api. // hostPath volumes are not supported because they're not mounted into /var/lib/kubelet/pods, so our // daemonset pod has no way to access their data. - isHostPath, err := isHostPathVolume(&volume, pvc, b.pvClient.PersistentVolumes()) + isHostPath, err := isHostPathVolume(&volume, pvc, b.crClient) if err != nil { errs = append(errs, errors.Wrap(err, "error checking if volume is a hostPath volume")) continue @@ -303,11 +301,7 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api. } volumeBackup := newPodVolumeBackup(backup, pod, volume, repoIdentifier, b.uploaderType, pvc) - // TODO: once backupper is refactored to use controller-runtime, just pass client instead of anonymous func - if err := veleroclient.CreateRetryGenerateNameWithFunc(volumeBackup, func() error { - _, err := b.veleroClient.VeleroV1().PodVolumeBackups(volumeBackup.Namespace).Create(context.TODO(), volumeBackup, metav1.CreateOptions{}) - return err - }); err != nil { + if err := veleroclient.CreateRetryGenerateName(b.crClient, b.ctx, volumeBackup); err != nil { errs = append(errs, err) continue } @@ -339,13 +333,9 @@ ForEachVolume: return podVolumeBackups, pvcSummary, errs } -type pvGetter interface { - Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1api.PersistentVolume, error) -} - // isHostPathVolume returns true if the volume is either a hostPath pod volume or a persistent // volume claim on a hostPath persistent volume, or false otherwise. -func isHostPathVolume(volume *corev1api.Volume, pvc *corev1api.PersistentVolumeClaim, pvGetter pvGetter) (bool, error) { +func isHostPathVolume(volume *corev1api.Volume, pvc *corev1api.PersistentVolumeClaim, crClient ctrlclient.Client) (bool, error) { if volume.HostPath != nil { return true, nil } @@ -354,7 +344,8 @@ func isHostPathVolume(volume *corev1api.Volume, pvc *corev1api.PersistentVolumeC return false, nil } - pv, err := pvGetter.Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) + pv := new(corev1api.PersistentVolume) + err := crClient.Get(context.TODO(), ctrlclient.ObjectKey{Name: pvc.Spec.VolumeName}, pv) if err != nil { return false, errors.WithStack(err) } @@ -422,7 +413,3 @@ func newPodVolumeBackup(backup *velerov1api.Backup, pod *corev1api.Pod, volume c return pvb } - -func errorOnly(_ interface{}, err error) error { - return err -} diff --git a/pkg/podvolume/backupper_factory.go b/pkg/podvolume/backupper_factory.go index 568bcb641..84020e664 100644 --- a/pkg/podvolume/backupper_factory.go +++ b/pkg/podvolume/backupper_factory.go @@ -18,17 +18,14 @@ package podvolume import ( "context" - "fmt" "github.com/pkg/errors" "github.com/sirupsen/logrus" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - corev1client "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/cache" + ctrlcache "sigs.k8s.io/controller-runtime/pkg/cache" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - clientset "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" - velerov1informers "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1" "github.com/vmware-tanzu/velero/pkg/repository" ) @@ -41,48 +38,31 @@ type BackupperFactory interface { func NewBackupperFactory( repoLocker *repository.RepoLocker, repoEnsurer *repository.Ensurer, - veleroClient clientset.Interface, - pvcClient corev1client.PersistentVolumeClaimsGetter, - pvClient corev1client.PersistentVolumesGetter, - podClient corev1client.PodsGetter, + crClient ctrlclient.Client, + pvbInformer ctrlcache.Informer, log logrus.FieldLogger, ) BackupperFactory { return &backupperFactory{ - repoLocker: repoLocker, - repoEnsurer: repoEnsurer, - veleroClient: veleroClient, - pvcClient: pvcClient, - pvClient: pvClient, - podClient: podClient, - log: log, + repoLocker: repoLocker, + repoEnsurer: repoEnsurer, + crClient: crClient, + pvbInformer: pvbInformer, + log: log, } } type backupperFactory struct { - repoLocker *repository.RepoLocker - repoEnsurer *repository.Ensurer - veleroClient clientset.Interface - pvcClient corev1client.PersistentVolumeClaimsGetter - pvClient corev1client.PersistentVolumesGetter - podClient corev1client.PodsGetter - log logrus.FieldLogger + repoLocker *repository.RepoLocker + repoEnsurer *repository.Ensurer + crClient ctrlclient.Client + pvbInformer ctrlcache.Informer + log logrus.FieldLogger } func (bf *backupperFactory) NewBackupper(ctx context.Context, backup *velerov1api.Backup, uploaderType string) (Backupper, error) { - informer := velerov1informers.NewFilteredPodVolumeBackupInformer( - bf.veleroClient, - backup.Namespace, - 0, - cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, - func(opts *metav1.ListOptions) { - opts.LabelSelector = fmt.Sprintf("%s=%s", velerov1api.BackupUIDLabel, backup.UID) - }, - ) + b := newBackupper(ctx, bf.repoLocker, bf.repoEnsurer, bf.pvbInformer, bf.crClient, uploaderType, backup, bf.log) - b := newBackupper(ctx, bf.repoLocker, bf.repoEnsurer, informer, bf.veleroClient, bf.pvcClient, bf.pvClient, bf.podClient, uploaderType, bf.log) - - go informer.Run(ctx.Done()) - if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) { + if !cache.WaitForCacheSync(ctx.Done(), bf.pvbInformer.HasSynced) { return nil, errors.New("timed out waiting for caches to sync") } diff --git a/pkg/podvolume/backupper_test.go b/pkg/podvolume/backupper_test.go index 06520f2f2..e08b772dd 100644 --- a/pkg/podvolume/backupper_test.go +++ b/pkg/podvolume/backupper_test.go @@ -23,7 +23,6 @@ import ( "testing" "time" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -32,17 +31,15 @@ import ( "k8s.io/apimachinery/pkg/runtime" ctrlfake "sigs.k8s.io/controller-runtime/pkg/client/fake" - "k8s.io/client-go/kubernetes" - kubefake "k8s.io/client-go/kubernetes/fake" clientTesting "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" "github.com/vmware-tanzu/velero/internal/resourcepolicies" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/builder" - "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" - velerofake "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/fake" "github.com/vmware-tanzu/velero/pkg/repository" velerotest "github.com/vmware-tanzu/velero/pkg/test" + "github.com/vmware-tanzu/velero/pkg/util/kube" ) func TestIsHostPathVolume(t *testing.T) { @@ -101,15 +98,14 @@ func TestIsHostPathVolume(t *testing.T) { VolumeName: "pv-1", }, } - pvGetter := &fakePVGetter{ - pv: &corev1api.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pv-1", - }, - Spec: corev1api.PersistentVolumeSpec{}, + pv := &corev1api.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pv-1", }, + Spec: corev1api.PersistentVolumeSpec{}, } - isHostPath, err = isHostPathVolume(vol, pvc, pvGetter) + crClient1 := velerotest.NewFakeControllerRuntimeClient(t, pv) + isHostPath, err = isHostPathVolume(vol, pvc, crClient1) assert.Nil(t, err) assert.False(t, isHostPath) @@ -130,35 +126,23 @@ func TestIsHostPathVolume(t *testing.T) { VolumeName: "pv-1", }, } - pvGetter = &fakePVGetter{ - pv: &corev1api.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pv-1", - }, - Spec: corev1api.PersistentVolumeSpec{ - PersistentVolumeSource: corev1api.PersistentVolumeSource{ - HostPath: &corev1api.HostPathVolumeSource{}, - }, + pv = &corev1api.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pv-1", + }, + Spec: corev1api.PersistentVolumeSpec{ + PersistentVolumeSource: corev1api.PersistentVolumeSource{ + HostPath: &corev1api.HostPathVolumeSource{}, }, }, } - isHostPath, err = isHostPathVolume(vol, pvc, pvGetter) + crClient2 := velerotest.NewFakeControllerRuntimeClient(t, pv) + + isHostPath, err = isHostPathVolume(vol, pvc, crClient2) assert.Nil(t, err) assert.True(t, isHostPath) } -type fakePVGetter struct { - pv *corev1api.PersistentVolume -} - -func (g *fakePVGetter) Get(ctx context.Context, name string, opts metav1.GetOptions) (*corev1api.PersistentVolume, error) { - if g.pv != nil { - return g.pv, nil - } - - return nil, errors.New("item not found") -} - func Test_backupper_BackupPodVolumes_log_test(t *testing.T) { type args struct { backup *velerov1api.Backup @@ -322,6 +306,7 @@ func createPVBObj(fail bool, withSnapshot bool, index int, uploaderType string) func TestBackupPodVolumes(t *testing.T) { scheme := runtime.NewScheme() velerov1api.AddToScheme(scheme) + corev1api.AddToScheme(scheme) ctxWithCancel, cancel := context.WithCancel(context.Background()) defer cancel() @@ -509,40 +494,6 @@ func TestBackupPodVolumes(t *testing.T) { uploaderType: "kopia", bsl: "fake-bsl", }, - { - name: "create PVB fail", - volumes: []string{ - "fake-volume-1", - "fake-volume-2", - }, - sourcePod: createPodObj(true, true, true, 2), - kubeClientObj: []runtime.Object{ - createNodeAgentPodObj(true), - createPVCObj(1), - createPVCObj(2), - createPVObj(1, false), - createPVObj(2, false), - }, - ctlClientObj: []runtime.Object{ - createBackupRepoObj(), - }, - veleroReactors: []reactor{ - { - verb: "create", - resource: "podvolumebackups", - reactorFunc: func(action clientTesting.Action) (handled bool, ret runtime.Object, err error) { - return true, nil, errors.New("fake-create-error") - }, - }, - }, - runtimeScheme: scheme, - uploaderType: "kopia", - bsl: "fake-bsl", - errs: []string{ - "fake-create-error", - "fake-create-error", - }, - }, { name: "context cancelled", ctx: ctxWithCancel, @@ -630,23 +581,28 @@ func TestBackupPodVolumes(t *testing.T) { fakeClientBuilder = fakeClientBuilder.WithScheme(test.runtimeScheme) } - fakeCtlClient := fakeClientBuilder.WithRuntimeObjects(test.ctlClientObj...).Build() + objList := append(test.ctlClientObj, test.veleroClientObj...) + objList = append(objList, test.kubeClientObj...) + fakeCtrlClient := fakeClientBuilder.WithRuntimeObjects(objList...).Build() - fakeKubeClient := kubefake.NewSimpleClientset(test.kubeClientObj...) - var kubeClient kubernetes.Interface = fakeKubeClient - - fakeVeleroClient := velerofake.NewSimpleClientset(test.veleroClientObj...) - for _, reactor := range test.veleroReactors { - fakeVeleroClient.Fake.PrependReactor(reactor.verb, reactor.resource, reactor.reactorFunc) + fakeCRWatchClient := velerotest.NewFakeControllerRuntimeWatchClient(t, test.kubeClientObj...) + lw := kube.InternalLW{ + Client: fakeCRWatchClient, + Namespace: velerov1api.DefaultNamespace, + ObjectList: new(velerov1api.PodVolumeBackupList), } - var veleroClient versioned.Interface = fakeVeleroClient - ensurer := repository.NewEnsurer(fakeCtlClient, velerotest.NewLogger(), time.Millisecond) + pvbInformer := cache.NewSharedIndexInformer(&lw, &velerov1api.PodVolumeBackup{}, 0, cache.Indexers{}) + + go pvbInformer.Run(ctx.Done()) + require.True(t, cache.WaitForCacheSync(ctx.Done(), pvbInformer.HasSynced)) + + ensurer := repository.NewEnsurer(fakeCtrlClient, velerotest.NewLogger(), time.Millisecond) backupObj := builder.ForBackup(velerov1api.DefaultNamespace, "fake-backup").Result() backupObj.Spec.StorageLocation = test.bsl - factory := NewBackupperFactory(repository.NewRepoLocker(), ensurer, veleroClient, kubeClient.CoreV1(), kubeClient.CoreV1(), kubeClient.CoreV1(), velerotest.NewLogger()) + factory := NewBackupperFactory(repository.NewRepoLocker(), ensurer, fakeCtrlClient, pvbInformer, velerotest.NewLogger()) bp, err := factory.NewBackupper(ctx, backupObj, test.uploaderType) require.NoError(t, err) diff --git a/pkg/podvolume/restorer.go b/pkg/podvolume/restorer.go index a55e100fc..d54a7e66c 100644 --- a/pkg/podvolume/restorer.go +++ b/pkg/podvolume/restorer.go @@ -27,12 +27,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" - corev1client "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/cache" + ctrlcache "sigs.k8s.io/controller-runtime/pkg/cache" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" veleroclient "github.com/vmware-tanzu/velero/pkg/client" - clientset "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" "github.com/vmware-tanzu/velero/pkg/label" "github.com/vmware-tanzu/velero/pkg/nodeagent" "github.com/vmware-tanzu/velero/pkg/repository" @@ -54,13 +54,11 @@ type Restorer interface { } type restorer struct { - ctx context.Context - repoLocker *repository.RepoLocker - repoEnsurer *repository.Ensurer - veleroClient clientset.Interface - pvcClient corev1client.PersistentVolumeClaimsGetter - podClient corev1client.PodsGetter - kubeClient kubernetes.Interface + ctx context.Context + repoLocker *repository.RepoLocker + repoEnsurer *repository.Ensurer + kubeClient kubernetes.Interface + crClient ctrlclient.Client resultsLock sync.Mutex results map[string]chan *velerov1api.PodVolumeRestore @@ -72,30 +70,30 @@ func newRestorer( ctx context.Context, repoLocker *repository.RepoLocker, repoEnsurer *repository.Ensurer, - podVolumeRestoreInformer cache.SharedIndexInformer, - veleroClient clientset.Interface, - pvcClient corev1client.PersistentVolumeClaimsGetter, - podClient corev1client.PodsGetter, + pvrInformer ctrlcache.Informer, kubeClient kubernetes.Interface, + crClient ctrlclient.Client, + restore *velerov1api.Restore, log logrus.FieldLogger, ) *restorer { r := &restorer{ - ctx: ctx, - repoLocker: repoLocker, - repoEnsurer: repoEnsurer, - veleroClient: veleroClient, - pvcClient: pvcClient, - podClient: podClient, - kubeClient: kubeClient, + ctx: ctx, + repoLocker: repoLocker, + repoEnsurer: repoEnsurer, + kubeClient: kubeClient, + crClient: crClient, results: make(map[string]chan *velerov1api.PodVolumeRestore), log: log, } - podVolumeRestoreInformer.AddEventHandler( + pvrInformer.AddEventHandler( cache.ResourceEventHandlerFuncs{ UpdateFunc: func(_, obj interface{}) { pvr := obj.(*velerov1api.PodVolumeRestore) + if pvr.GetLabels()[velerov1api.RestoreUIDLabel] != string(restore.UID) { + return + } if pvr.Status.Phase == velerov1api.PodVolumeRestorePhaseCompleted || pvr.Status.Phase == velerov1api.PodVolumeRestorePhaseFailed { r.resultsLock.Lock() @@ -169,7 +167,8 @@ func (r *restorer) RestorePodVolumes(data RestoreData) []error { var pvc *corev1api.PersistentVolumeClaim if ok { if volumeObj.PersistentVolumeClaim != nil { - pvc, err = r.pvcClient.PersistentVolumeClaims(data.Pod.Namespace).Get(context.TODO(), volumeObj.PersistentVolumeClaim.ClaimName, metav1.GetOptions{}) + pvc := new(corev1api.PersistentVolumeClaim) + err := r.crClient.Get(context.TODO(), ctrlclient.ObjectKey{Namespace: data.Pod.Namespace, Name: volumeObj.PersistentVolumeClaim.ClaimName}, pvc) if err != nil { errs = append(errs, errors.Wrap(err, "error getting persistent volume claim for volume")) continue @@ -179,10 +178,7 @@ func (r *restorer) RestorePodVolumes(data RestoreData) []error { volumeRestore := newPodVolumeRestore(data.Restore, data.Pod, data.BackupLocation, volume, backupInfo.snapshotID, repoIdentifier, backupInfo.uploaderType, data.SourceNamespace, pvc) - // TODO: once restorer is refactored to use controller-runtime, just pass client instead of anonymous func - if err := veleroclient.CreateRetryGenerateNameWithFunc(volumeRestore, func() error { - return errorOnly(r.veleroClient.VeleroV1().PodVolumeRestores(volumeRestore.Namespace).Create(context.TODO(), volumeRestore, metav1.CreateOptions{})) - }); err != nil { + if err := veleroclient.CreateRetryGenerateName(r.crClient, r.ctx, volumeRestore); err != nil { errs = append(errs, errors.WithStack(err)) continue } @@ -214,7 +210,7 @@ func (r *restorer) RestorePodVolumes(data RestoreData) []error { } else if err != nil { r.log.WithError(err).Error("Failed to check node-agent pod status, disengage") } else { - err = nodeagent.IsRunningInNode(checkCtx, data.Restore.Namespace, nodeName, r.podClient) + err = nodeagent.IsRunningInNode(checkCtx, data.Restore.Namespace, nodeName, r.crClient) if err != nil { r.log.WithField("node", nodeName).WithError(err).Error("node-agent pod is not running in node, abort the restore") r.nodeAgentCheck <- errors.Wrapf(err, "node-agent pod is not running in node %s", nodeName) diff --git a/pkg/podvolume/restorer_factory.go b/pkg/podvolume/restorer_factory.go index 50baf3567..178d720c8 100644 --- a/pkg/podvolume/restorer_factory.go +++ b/pkg/podvolume/restorer_factory.go @@ -18,18 +18,15 @@ package podvolume import ( "context" - "fmt" "github.com/pkg/errors" "github.com/sirupsen/logrus" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" - corev1client "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/cache" + ctrlcache "sigs.k8s.io/controller-runtime/pkg/cache" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - clientset "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" - velerov1informers "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1" "github.com/vmware-tanzu/velero/pkg/repository" ) @@ -41,47 +38,33 @@ type RestorerFactory interface { func NewRestorerFactory(repoLocker *repository.RepoLocker, repoEnsurer *repository.Ensurer, - veleroClient clientset.Interface, - pvcClient corev1client.PersistentVolumeClaimsGetter, - podClient corev1client.PodsGetter, kubeClient kubernetes.Interface, + crClient ctrlclient.Client, + pvrInformer ctrlcache.Informer, log logrus.FieldLogger) RestorerFactory { return &restorerFactory{ - repoLocker: repoLocker, - repoEnsurer: repoEnsurer, - veleroClient: veleroClient, - pvcClient: pvcClient, - podClient: podClient, - kubeClient: kubeClient, - log: log, + repoLocker: repoLocker, + repoEnsurer: repoEnsurer, + kubeClient: kubeClient, + crClient: crClient, + pvrInformer: pvrInformer, + log: log, } } type restorerFactory struct { - repoLocker *repository.RepoLocker - repoEnsurer *repository.Ensurer - veleroClient clientset.Interface - pvcClient corev1client.PersistentVolumeClaimsGetter - podClient corev1client.PodsGetter - kubeClient kubernetes.Interface - log logrus.FieldLogger + repoLocker *repository.RepoLocker + repoEnsurer *repository.Ensurer + kubeClient kubernetes.Interface + crClient ctrlclient.Client + pvrInformer ctrlcache.Informer + log logrus.FieldLogger } func (rf *restorerFactory) NewRestorer(ctx context.Context, restore *velerov1api.Restore) (Restorer, error) { - informer := velerov1informers.NewFilteredPodVolumeRestoreInformer( - rf.veleroClient, - restore.Namespace, - 0, - cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, - func(opts *metav1.ListOptions) { - opts.LabelSelector = fmt.Sprintf("%s=%s", velerov1api.RestoreUIDLabel, restore.UID) - }, - ) + r := newRestorer(ctx, rf.repoLocker, rf.repoEnsurer, rf.pvrInformer, rf.kubeClient, rf.crClient, restore, rf.log) - r := newRestorer(ctx, rf.repoLocker, rf.repoEnsurer, informer, rf.veleroClient, rf.pvcClient, rf.podClient, rf.kubeClient, rf.log) - - go informer.Run(ctx.Done()) - if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) { + if !cache.WaitForCacheSync(ctx.Done(), rf.pvrInformer.HasSynced) { return nil, errors.New("timed out waiting for cache to sync") } diff --git a/pkg/podvolume/restorer_test.go b/pkg/podvolume/restorer_test.go index 0202904d8..f630b0fd5 100644 --- a/pkg/podvolume/restorer_test.go +++ b/pkg/podvolume/restorer_test.go @@ -22,7 +22,6 @@ import ( "testing" "time" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" appv1 "k8s.io/api/apps/v1" @@ -31,15 +30,14 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes" kubefake "k8s.io/client-go/kubernetes/fake" - clientTesting "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" ctrlfake "sigs.k8s.io/controller-runtime/pkg/client/fake" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/builder" - "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" - velerofake "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/fake" "github.com/vmware-tanzu/velero/pkg/repository" velerotest "github.com/vmware-tanzu/velero/pkg/test" + "github.com/vmware-tanzu/velero/pkg/util/kube" ) func TestGetVolumesRepositoryType(t *testing.T) { @@ -162,6 +160,7 @@ type expectError struct { func TestRestorePodVolumes(t *testing.T) { scheme := runtime.NewScheme() velerov1api.AddToScheme(scheme) + corev1api.AddToScheme(scheme) ctxWithCancel, cancel := context.WithCancel(context.Background()) defer cancel() @@ -264,42 +263,6 @@ func TestRestorePodVolumes(t *testing.T) { }, }, }, - { - name: "create pvb fail", - pvbs: []*velerov1api.PodVolumeBackup{ - createPVBObj(true, true, 1, "kopia"), - createPVBObj(true, true, 2, "kopia"), - }, - kubeClientObj: []runtime.Object{ - createNodeAgentDaemonset(), - createPVCObj(1), - createPVCObj(2), - }, - ctlClientObj: []runtime.Object{ - createBackupRepoObj(), - }, - veleroReactors: []reactor{ - { - verb: "create", - resource: "podvolumerestores", - reactorFunc: func(action clientTesting.Action) (handled bool, ret runtime.Object, err error) { - return true, nil, errors.New("fake-create-error") - }, - }, - }, - restoredPod: createPodObj(true, true, true, 2), - sourceNamespace: "fake-ns", - bsl: "fake-bsl", - runtimeScheme: scheme, - errs: []expectError{ - { - err: "fake-create-error", - }, - { - err: "fake-create-error", - }, - }, - }, { name: "create pvb fail", ctx: ctxWithCancel, @@ -407,22 +370,32 @@ func TestRestorePodVolumes(t *testing.T) { fakeClientBuilder = fakeClientBuilder.WithScheme(test.runtimeScheme) } - fakeCtlClient := fakeClientBuilder.WithRuntimeObjects(test.ctlClientObj...).Build() + objClient := append(test.ctlClientObj, test.kubeClientObj...) + objClient = append(objClient, test.veleroClientObj...) + + fakeCRClient := velerotest.NewFakeControllerRuntimeClient(t, objClient...) fakeKubeClient := kubefake.NewSimpleClientset(test.kubeClientObj...) var kubeClient kubernetes.Interface = fakeKubeClient - fakeVeleroClient := velerofake.NewSimpleClientset(test.veleroClientObj...) - for _, reactor := range test.veleroReactors { - fakeVeleroClient.Fake.PrependReactor(reactor.verb, reactor.resource, reactor.reactorFunc) + fakeCRWatchClient := velerotest.NewFakeControllerRuntimeWatchClient(t, test.kubeClientObj...) + lw := kube.InternalLW{ + Client: fakeCRWatchClient, + Namespace: velerov1api.DefaultNamespace, + ObjectList: new(velerov1api.PodVolumeRestoreList), } - var veleroClient versioned.Interface = fakeVeleroClient - ensurer := repository.NewEnsurer(fakeCtlClient, velerotest.NewLogger(), time.Millisecond) + pvrInformer := cache.NewSharedIndexInformer(&lw, &velerov1api.PodVolumeBackup{}, 0, cache.Indexers{}) + + go pvrInformer.Run(ctx.Done()) + require.True(t, cache.WaitForCacheSync(ctx.Done(), pvrInformer.HasSynced)) + + ensurer := repository.NewEnsurer(fakeCRClient, velerotest.NewLogger(), time.Millisecond) restoreObj := builder.ForRestore(velerov1api.DefaultNamespace, "fake-restore").Result() - factory := NewRestorerFactory(repository.NewRepoLocker(), ensurer, veleroClient, kubeClient.CoreV1(), kubeClient.CoreV1(), kubeClient, velerotest.NewLogger()) + factory := NewRestorerFactory(repository.NewRepoLocker(), ensurer, kubeClient, + fakeCRClient, pvrInformer, velerotest.NewLogger()) rs, err := factory.NewRestorer(ctx, restoreObj) require.NoError(t, err) diff --git a/pkg/repository/config/aws.go b/pkg/repository/config/aws.go index cc4e14f59..d7208068f 100644 --- a/pkg/repository/config/aws.go +++ b/pkg/repository/config/aws.go @@ -21,12 +21,10 @@ import ( "context" "os" - goerr "errors" - - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/s3/s3manager" + "github.com/aws/aws-sdk-go-v2/aws" + awsconfig "github.com/aws/aws-sdk-go-v2/config" + s3manager "github.com/aws/aws-sdk-go-v2/feature/s3/manager" + "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/pkg/errors" ) @@ -64,7 +62,7 @@ func GetS3ResticEnvVars(config map[string]string) (map[string]string, error) { result[awsSecretKeyEnvVar] = creds.SecretAccessKey result[awsSessTokenEnvVar] = creds.SessionToken result[awsCredentialsFileEnvVar] = "" - result[awsProfileEnvVar] = "" + result[awsProfileEnvVar] = "" // profile is not needed since we have the credentials from profile via GetS3Credentials result[awsConfigFileEnvVar] = "" } @@ -73,27 +71,29 @@ func GetS3ResticEnvVars(config map[string]string) (map[string]string, error) { // GetS3Credentials gets the S3 credential values according to the information // of the provided config or the system's environment variables -func GetS3Credentials(config map[string]string) (*credentials.Value, error) { +func GetS3Credentials(config map[string]string) (*aws.Credentials, error) { if os.Getenv(awsRoleEnvVar) != "" { return nil, nil } - opts := session.Options{} + var opts []func(*awsconfig.LoadOptions) error credentialsFile := config[CredentialsFileKey] if credentialsFile == "" { credentialsFile = os.Getenv(awsCredentialsFileEnvVar) } if credentialsFile != "" { - opts.SharedConfigFiles = append(opts.SharedConfigFiles, credentialsFile) - opts.SharedConfigState = session.SharedConfigEnable + opts = append(opts, awsconfig.WithSharedCredentialsFiles([]string{credentialsFile}), + // To support the existing use case where config file is passed + // as credentials of a BSL + awsconfig.WithSharedConfigFiles([]string{credentialsFile})) } + opts = append(opts, awsconfig.WithSharedConfigProfile(config[awsProfileKey])) - sess, err := session.NewSessionWithOptions(opts) + cfg, err := awsconfig.LoadDefaultConfig(context.Background(), opts...) if err != nil { return nil, err } - - creds, err := sess.Config.Credentials.Get() + creds, err := cfg.Credentials.Retrieve(context.Background()) return &creds, err } @@ -101,33 +101,17 @@ func GetS3Credentials(config map[string]string) (*credentials.Value, error) { // GetAWSBucketRegion returns the AWS region that a bucket is in, or an error // if the region cannot be determined. func GetAWSBucketRegion(bucket string) (string, error) { - sess, err := session.NewSession() + cfg, err := awsconfig.LoadDefaultConfig(context.Background()) if err != nil { return "", errors.WithStack(err) } - - var region string - var requestErrs []error - - for _, partition := range endpoints.DefaultPartitions() { - for regionHint := range partition.Regions() { - region, err = s3manager.GetBucketRegion(context.Background(), sess, bucket, regionHint) - if err != nil { - requestErrs = append(requestErrs, errors.Wrapf(err, "error to get region with hint %s", regionHint)) - } - - // we only need to try a single region hint per partition, so break after the first - break - } - - if region != "" { - return region, nil - } + client := s3.NewFromConfig(cfg) + region, err := s3manager.GetBucketRegion(context.Background(), client, bucket) + if err != nil { + return "", errors.WithStack(err) } - - if requestErrs == nil { - return "", errors.Errorf("unable to determine region by bucket %s", bucket) - } else { - return "", errors.Wrapf(goerr.Join(requestErrs...), "error to get region by bucket %s", bucket) + if region == "" { + return "", errors.New("unable to determine bucket's region") } + return region, nil } diff --git a/pkg/repository/config/aws_test.go b/pkg/repository/config/aws_test.go index bdd3e4fa2..ba7d00f6b 100644 --- a/pkg/repository/config/aws_test.go +++ b/pkg/repository/config/aws_test.go @@ -17,8 +17,11 @@ limitations under the License. package config import ( + "os" + "reflect" "testing" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/stretchr/testify/require" ) @@ -63,3 +66,81 @@ func TestGetS3ResticEnvVars(t *testing.T) { }) } } + +func TestGetS3CredentialsCorrectlyUseProfile(t *testing.T) { + type args struct { + config map[string]string + secretFileContents string + } + tests := []struct { + name string + args args + want *aws.Credentials + wantErr bool + }{ + { + name: "Test GetS3Credentials use profile correctly", + args: args{ + config: map[string]string{ + "profile": "some-profile", + }, + secretFileContents: `[default] + aws_access_key_id = default-access-key-id + aws_secret_access_key = default-secret-access-key + [profile some-profile] + aws_access_key_id = some-profile-access-key-id + aws_secret_access_key = some-profile-secret-access-key + `, + }, + want: &aws.Credentials{ + AccessKeyID: "some-profile-access-key-id", + SecretAccessKey: "some-profile-secret-access-key", + }, + }, + { + name: "Test GetS3Credentials default to default profile", + args: args{ + config: map[string]string{}, + secretFileContents: `[default] + aws_access_key_id = default-access-key-id + aws_secret_access_key = default-secret-access-key + [profile some-profile] + aws_access_key_id = some-profile-access-key-id + aws_secret_access_key = some-profile-secret-access-key + `, + }, + want: &aws.Credentials{ + AccessKeyID: "default-access-key-id", + SecretAccessKey: "default-secret-access-key", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tmpFile, err := os.CreateTemp("", "velero-test-aws-credentials") + defer os.Remove(tmpFile.Name()) + if err != nil { + t.Errorf("GetS3Credentials() error = %v", err) + return + } + // write the contents of the secret file to the temp file + _, err = tmpFile.WriteString(tt.args.secretFileContents) + if err != nil { + t.Errorf("GetS3Credentials() error = %v", err) + return + } + tt.args.config["credentialsFile"] = tmpFile.Name() + got, err := GetS3Credentials(tt.args.config) + if (err != nil) != tt.wantErr { + t.Errorf("GetS3Credentials() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got.AccessKeyID, tt.want.AccessKeyID) { + t.Errorf("GetS3Credentials() got = %v, want %v", got.AccessKeyID, tt.want.AccessKeyID) + } + if !reflect.DeepEqual(got.SecretAccessKey, tt.want.SecretAccessKey) { + t.Errorf("GetS3Credentials() got = %v, want %v", got.SecretAccessKey, tt.want.SecretAccessKey) + } + }) + } +} diff --git a/pkg/repository/provider/unified_repo.go b/pkg/repository/provider/unified_repo.go index 988174fa9..76ae36351 100644 --- a/pkg/repository/provider/unified_repo.go +++ b/pkg/repository/provider/unified_repo.go @@ -53,7 +53,7 @@ var getGCPCredentials = repoconfig.GetGCPCredentials var getS3BucketRegion = repoconfig.GetAWSBucketRegion type localFuncTable struct { - getStorageVariables func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) + getStorageVariables func(*velerov1api.BackupStorageLocation, string, string, credentials.FileStore) (map[string]string, error) getStorageCredentials func(*velerov1api.BackupStorageLocation, credentials.FileStore) (map[string]string, error) } @@ -347,7 +347,7 @@ func (urp *unifiedRepoProvider) GetStoreOptions(param interface{}) (map[string]s return map[string]string{}, errors.Errorf("invalid parameter, expect %T, actual %T", RepoParam{}, param) } - storeVar, err := funcTable.getStorageVariables(repoParam.BackupLocation, urp.repoBackend, repoParam.BackupRepo.Spec.VolumeNamespace) + storeVar, err := funcTable.getStorageVariables(repoParam.BackupLocation, urp.repoBackend, repoParam.BackupRepo.Spec.VolumeNamespace, urp.credentialGetter.FromFile) if err != nil { return map[string]string{}, errors.Wrap(err, "error to get storage variables") } @@ -433,7 +433,7 @@ func getStorageCredentials(backupLocation *velerov1api.BackupStorageLocation, cr if credValue != nil { result[udmrepo.StoreOptionS3KeyID] = credValue.AccessKeyID - result[udmrepo.StoreOptionS3Provider] = credValue.ProviderName + result[udmrepo.StoreOptionS3Provider] = credValue.Source result[udmrepo.StoreOptionS3SecretKey] = credValue.SecretAccessKey result[udmrepo.StoreOptionS3Token] = credValue.SessionToken } @@ -447,7 +447,8 @@ func getStorageCredentials(backupLocation *velerov1api.BackupStorageLocation, cr return result, nil } -func getStorageVariables(backupLocation *velerov1api.BackupStorageLocation, repoBackend string, repoName string) (map[string]string, error) { +func getStorageVariables(backupLocation *velerov1api.BackupStorageLocation, repoBackend string, repoName string, + credentialFileStore credentials.FileStore) (map[string]string, error) { result := make(map[string]string) backendType := repoconfig.GetBackendType(backupLocation.Spec.Provider, backupLocation.Spec.Config) @@ -459,6 +460,13 @@ func getStorageVariables(backupLocation *velerov1api.BackupStorageLocation, repo if config == nil { config = map[string]string{} } + if backupLocation.Spec.Credential != nil { + credsFile, err := credentialFileStore.Path(backupLocation.Spec.Credential) + if err != nil { + return map[string]string{}, errors.WithStack(err) + } + config[repoconfig.CredentialsFileKey] = credsFile + } bucket := strings.Trim(config["bucket"], "/") prefix := strings.Trim(config["prefix"], "/") @@ -477,9 +485,11 @@ func getStorageVariables(backupLocation *velerov1api.BackupStorageLocation, repo var err error if s3URL == "" { - region, err = getS3BucketRegion(bucket) - if err != nil { - return map[string]string{}, errors.Wrap(err, "error get s3 bucket region") + if region == "" { + region, err = getS3BucketRegion(bucket) + if err != nil { + return map[string]string{}, errors.Wrap(err, "error get s3 bucket region") + } } s3URL = fmt.Sprintf("s3-%s.amazonaws.com", region) diff --git a/pkg/repository/provider/unified_repo_test.go b/pkg/repository/provider/unified_repo_test.go index 74cdc74b2..3f8e241b9 100644 --- a/pkg/repository/provider/unified_repo_test.go +++ b/pkg/repository/provider/unified_repo_test.go @@ -22,7 +22,7 @@ import ( "errors" "testing" - awscredentials "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/kopia/kopia/repo" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -45,7 +45,7 @@ func TestGetStorageCredentials(t *testing.T) { credFileStore *credmock.FileStore credStoreError error credStorePath string - getS3Credentials func(map[string]string) (*awscredentials.Value, error) + getS3Credentials func(map[string]string) (*aws.Credentials, error) getGCPCredentials func(map[string]string) string expected map[string]string expectedErr string @@ -89,8 +89,8 @@ func TestGetStorageCredentials(t *testing.T) { }, }, }, - getS3Credentials: func(config map[string]string) (*awscredentials.Value, error) { - return &awscredentials.Value{ + getS3Credentials: func(config map[string]string) (*aws.Credentials, error) { + return &aws.Credentials{ AccessKeyID: "from: " + config["credentialsFile"], }, nil }, @@ -115,8 +115,8 @@ func TestGetStorageCredentials(t *testing.T) { }, credFileStore: new(credmock.FileStore), credStorePath: "credentials-from-credential-key", - getS3Credentials: func(config map[string]string) (*awscredentials.Value, error) { - return &awscredentials.Value{ + getS3Credentials: func(config map[string]string) (*aws.Credentials, error) { + return &aws.Credentials{ AccessKeyID: "from: " + config["credentialsFile"], }, nil }, @@ -138,7 +138,7 @@ func TestGetStorageCredentials(t *testing.T) { }, }, }, - getS3Credentials: func(config map[string]string) (*awscredentials.Value, error) { + getS3Credentials: func(config map[string]string) (*aws.Credentials, error) { return nil, errors.New("fake error") }, credFileStore: new(credmock.FileStore), @@ -153,7 +153,7 @@ func TestGetStorageCredentials(t *testing.T) { Config: map[string]string{}, }, }, - getS3Credentials: func(config map[string]string) (*awscredentials.Value, error) { + getS3Credentials: func(config map[string]string) (*aws.Credentials, error) { return nil, nil }, credFileStore: new(credmock.FileStore), @@ -437,11 +437,12 @@ func TestGetStorageVariables(t *testing.T) { }, } + credFileStore := new(credmock.FileStore) for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { getS3BucketRegion = tc.getS3BucketRegion - actual, err := getStorageVariables(&tc.backupLocation, tc.repoBackend, tc.repoName) + actual, err := getStorageVariables(&tc.backupLocation, tc.repoBackend, tc.repoName, credFileStore) require.Equal(t, tc.expected, actual) @@ -530,7 +531,7 @@ func TestGetStoreOptions(t *testing.T) { BackupRepo: &velerov1api.BackupRepository{}, }, funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, velerocredentials.FileStore) (map[string]string, error) { return map[string]string{}, errors.New("fake-error-2") }, }, @@ -544,7 +545,7 @@ func TestGetStoreOptions(t *testing.T) { BackupRepo: &velerov1api.BackupRepository{}, }, funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, velerocredentials.FileStore) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -604,7 +605,7 @@ func TestPrepareRepo(t *testing.T) { repoService: new(reposervicenmocks.BackupRepoService), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, velerocredentials.FileStore) (map[string]string, error) { return map[string]string{}, errors.New("fake-store-option-error") }, }, @@ -615,7 +616,7 @@ func TestPrepareRepo(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, velerocredentials.FileStore) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -635,7 +636,7 @@ func TestPrepareRepo(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, velerocredentials.FileStore) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -656,7 +657,7 @@ func TestPrepareRepo(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, velerocredentials.FileStore) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -733,7 +734,7 @@ func TestForget(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, velerocredentials.FileStore) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -757,7 +758,7 @@ func TestForget(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, velerocredentials.FileStore) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -785,7 +786,7 @@ func TestForget(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, velerocredentials.FileStore) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -877,7 +878,7 @@ func TestInitRepo(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, velerocredentials.FileStore) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -895,7 +896,7 @@ func TestInitRepo(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, velerocredentials.FileStore) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -965,7 +966,7 @@ func TestConnectToRepo(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, velerocredentials.FileStore) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -983,7 +984,7 @@ func TestConnectToRepo(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, velerocredentials.FileStore) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -1057,7 +1058,7 @@ func TestBoostRepoConnect(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, velerocredentials.FileStore) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -1084,7 +1085,7 @@ func TestBoostRepoConnect(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, velerocredentials.FileStore) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -1110,7 +1111,7 @@ func TestBoostRepoConnect(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, velerocredentials.FileStore) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -1197,7 +1198,7 @@ func TestPruneRepo(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, velerocredentials.FileStore) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { @@ -1215,7 +1216,7 @@ func TestPruneRepo(t *testing.T) { getter: new(credmock.SecretStore), credStoreReturn: "fake-password", funcTable: localFuncTable{ - getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string) (map[string]string, error) { + getStorageVariables: func(*velerov1api.BackupStorageLocation, string, string, velerocredentials.FileStore) (map[string]string, error) { return map[string]string{}, nil }, getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { diff --git a/pkg/restore/pod_volume_restore_action.go b/pkg/restore/pod_volume_restore_action.go index e5d77b18e..af5c2373c 100644 --- a/pkg/restore/pod_volume_restore_action.go +++ b/pkg/restore/pod_volume_restore_action.go @@ -27,11 +27,11 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" veleroimage "github.com/vmware-tanzu/velero/internal/velero" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/builder" - velerov1client "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1" "github.com/vmware-tanzu/velero/pkg/label" "github.com/vmware-tanzu/velero/pkg/plugin/framework/common" "github.com/vmware-tanzu/velero/pkg/plugin/velero" @@ -47,16 +47,16 @@ const ( ) type PodVolumeRestoreAction struct { - logger logrus.FieldLogger - client corev1client.ConfigMapInterface - podVolumeBackupClient velerov1client.PodVolumeBackupInterface + logger logrus.FieldLogger + client corev1client.ConfigMapInterface + crClient ctrlclient.Client } -func NewPodVolumeRestoreAction(logger logrus.FieldLogger, client corev1client.ConfigMapInterface, podVolumeBackupClient velerov1client.PodVolumeBackupInterface) *PodVolumeRestoreAction { +func NewPodVolumeRestoreAction(logger logrus.FieldLogger, client corev1client.ConfigMapInterface, crClient ctrlclient.Client) *PodVolumeRestoreAction { return &PodVolumeRestoreAction{ - logger: logger, - client: client, - podVolumeBackupClient: podVolumeBackupClient, + logger: logger, + client: client, + crClient: crClient, } } @@ -86,9 +86,11 @@ func (a *PodVolumeRestoreAction) Execute(input *velero.RestoreItemActionExecuteI log := a.logger.WithField("pod", kube.NamespaceAndName(&pod)) - opts := label.NewListOptionsForBackup(input.Restore.Spec.BackupName) - podVolumeBackupList, err := a.podVolumeBackupClient.List(context.TODO(), opts) - if err != nil { + opts := &ctrlclient.ListOptions{ + LabelSelector: label.NewSelectorForBackup(input.Restore.Spec.BackupName), + } + podVolumeBackupList := new(velerov1api.PodVolumeBackupList) + if err := a.crClient.List(context.TODO(), podVolumeBackupList, opts); err != nil { return nil, errors.WithStack(err) } diff --git a/pkg/restore/pod_volume_restore_action_test.go b/pkg/restore/pod_volume_restore_action_test.go index f57ffdc69..cba4914e5 100644 --- a/pkg/restore/pod_volume_restore_action_test.go +++ b/pkg/restore/pod_volume_restore_action_test.go @@ -17,7 +17,6 @@ limitations under the License. package restore import ( - "context" "sort" "testing" @@ -25,7 +24,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" corev1api "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/fake" @@ -34,7 +32,6 @@ import ( velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/builder" "github.com/vmware-tanzu/velero/pkg/buildinfo" - velerofake "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/fake" "github.com/vmware-tanzu/velero/pkg/plugin/velero" velerotest "github.com/vmware-tanzu/velero/pkg/test" "github.com/vmware-tanzu/velero/pkg/util/kube" @@ -131,7 +128,7 @@ func TestPodVolumeRestoreActionExecute(t *testing.T) { name string pod *corev1api.Pod podFromBackup *corev1api.Pod - podVolumeBackups []*velerov1api.PodVolumeBackup + podVolumeBackups []runtime.Object want *corev1api.Pod }{ { @@ -179,7 +176,7 @@ func TestPodVolumeRestoreActionExecute(t *testing.T) { builder.WithAnnotations("snapshot.velero.io/not-used", "")). InitContainers(builder.ForContainer("first-container", "").Result()). Result(), - podVolumeBackups: []*velerov1api.PodVolumeBackup{ + podVolumeBackups: []runtime.Object{ builder.ForPodVolumeBackup(veleroNs, "pvb-1"). PodName("my-pod"). PodNamespace("ns-1"). @@ -225,7 +222,7 @@ func TestPodVolumeRestoreActionExecute(t *testing.T) { builder.ForVolume("vol-2").PersistentVolumeClaimSource("pvc-2").Result(), ). Result(), - podVolumeBackups: []*velerov1api.PodVolumeBackup{ + podVolumeBackups: []runtime.Object{ builder.ForPodVolumeBackup(veleroNs, "pvb-1"). PodName("my-pod"). PodNamespace("original-ns"). @@ -259,12 +256,7 @@ func TestPodVolumeRestoreActionExecute(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { clientset := fake.NewSimpleClientset() - clientsetVelero := velerofake.NewSimpleClientset() - - for _, podVolumeBackup := range tc.podVolumeBackups { - _, err := clientsetVelero.VeleroV1().PodVolumeBackups(veleroNs).Create(context.TODO(), podVolumeBackup, metav1.CreateOptions{}) - require.NoError(t, err) - } + crClient := velerotest.NewFakeControllerRuntimeClient(t, tc.podVolumeBackups...) unstructuredPod, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.pod) require.NoError(t, err) @@ -294,7 +286,7 @@ func TestPodVolumeRestoreActionExecute(t *testing.T) { a := NewPodVolumeRestoreAction( logrus.StandardLogger(), clientset.CoreV1().ConfigMaps(veleroNs), - clientsetVelero.VeleroV1().PodVolumeBackups(veleroNs), + crClient, ) // method under test diff --git a/pkg/restore/pv_restorer_test.go b/pkg/restore/pv_restorer_test.go index db92f6e5c..0d49ba4b8 100644 --- a/pkg/restore/pv_restorer_test.go +++ b/pkg/restore/pv_restorer_test.go @@ -28,8 +28,6 @@ import ( api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/builder" - "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/fake" - informers "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions" providermocks "github.com/vmware-tanzu/velero/pkg/plugin/velero/mocks/volumesnapshotter/v1" vsv1 "github.com/vmware-tanzu/velero/pkg/plugin/velero/volumesnapshotter/v1" velerotest "github.com/vmware-tanzu/velero/pkg/test" @@ -116,11 +114,6 @@ func TestExecutePVAction_NoSnapshotRestores(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - var ( - client = fake.NewSimpleClientset() - snapshotLocationInformer = informers.NewSharedInformerFactory(client, 0).Velero().V1().VolumeSnapshotLocations() - ) - r := &pvRestorer{ logger: velerotest.NewLogger(), restorePVs: tc.restore.Spec.RestorePVs, @@ -132,7 +125,7 @@ func TestExecutePVAction_NoSnapshotRestores(t *testing.T) { } for _, loc := range tc.locations { - require.NoError(t, snapshotLocationInformer.Informer().GetStore().Add(loc)) + require.NoError(t, r.kbclient.Create(context.TODO(), loc)) } res, err := r.executePVAction(tc.obj) diff --git a/pkg/restore/request.go b/pkg/restore/request.go index dcc2ef3d6..2a267a5ff 100644 --- a/pkg/restore/request.go +++ b/pkg/restore/request.go @@ -21,6 +21,7 @@ import ( "io" "sort" + snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/runtime" @@ -60,6 +61,7 @@ type Request struct { itemOperationsList *[]*itemoperation.RestoreOperation ResourceModifiers *resourcemodifiers.ResourceModifiers DisableInformerCache bool + CSIVolumeSnapshots []*snapshotv1api.VolumeSnapshot } type restoredItemStatus struct { diff --git a/pkg/restore/restore.go b/pkg/restore/restore.go index 835553330..ea0af47c9 100644 --- a/pkg/restore/restore.go +++ b/pkg/restore/restore.go @@ -30,6 +30,7 @@ import ( "time" "github.com/google/uuid" + snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" @@ -68,6 +69,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/podvolume" "github.com/vmware-tanzu/velero/pkg/util/boolptr" "github.com/vmware-tanzu/velero/pkg/util/collections" + csiutil "github.com/vmware-tanzu/velero/pkg/util/csi" "github.com/vmware-tanzu/velero/pkg/util/filesystem" "github.com/vmware-tanzu/velero/pkg/util/kube" "github.com/vmware-tanzu/velero/pkg/util/results" @@ -113,6 +115,7 @@ type kubernetesRestorer struct { podGetter cache.Getter credentialFileStore credentials.FileStore kbClient crclient.Client + featureVerifier features.Verifier } // NewKubernetesRestorer creates a new kubernetesRestorer. @@ -130,6 +133,7 @@ func NewKubernetesRestorer( podGetter cache.Getter, credentialStore credentials.FileStore, kbClient crclient.Client, + featureVerifier features.Verifier, ) (Restorer, error) { return &kubernetesRestorer{ discoveryHelper: discoveryHelper, @@ -154,6 +158,7 @@ func NewKubernetesRestorer( podGetter: podGetter, credentialFileStore: credentialStore, kbClient: kbClient, + featureVerifier: featureVerifier, }, nil } @@ -298,6 +303,7 @@ func (kr *kubernetesRestorer) RestoreWithResolvers( pvsToProvision: sets.NewString(), pvRestorer: pvRestorer, volumeSnapshots: req.VolumeSnapshots, + csiVolumeSnapshots: req.CSIVolumeSnapshots, podVolumeBackups: req.PodVolumeBackups, resourceTerminatingTimeout: kr.resourceTerminatingTimeout, resourceTimeout: kr.resourceTimeout, @@ -310,7 +316,7 @@ func (kr *kubernetesRestorer) RestoreWithResolvers( discoveryHelper: kr.discoveryHelper, resourcePriorities: kr.resourcePriorities, resourceRestoreHooks: resourceRestoreHooks, - hooksErrs: make(chan error), + hooksErrs: make(chan hook.HookErrInfo), waitExecHookHandler: waitExecHookHandler, hooksContext: hooksCtx, hooksCancelFunc: hooksCancelFunc, @@ -318,6 +324,7 @@ func (kr *kubernetesRestorer) RestoreWithResolvers( itemOperationsList: req.GetItemOperationsList(), resourceModifiers: req.ResourceModifiers, disableInformerCache: req.DisableInformerCache, + featureVerifier: kr.featureVerifier, } return restoreCtx.execute() @@ -347,6 +354,7 @@ type restoreContext struct { pvsToProvision sets.String pvRestorer PVRestorer volumeSnapshots []*volume.Snapshot + csiVolumeSnapshots []*snapshotv1api.VolumeSnapshot podVolumeBackups []*velerov1api.PodVolumeBackup resourceTerminatingTimeout time.Duration resourceTimeout time.Duration @@ -359,7 +367,7 @@ type restoreContext struct { discoveryHelper discovery.Helper resourcePriorities Priorities hooksWaitGroup sync.WaitGroup - hooksErrs chan error + hooksErrs chan hook.HookErrInfo resourceRestoreHooks []hook.ResourceRestoreHook waitExecHookHandler hook.WaitExecHookHandler hooksContext go_context.Context @@ -368,6 +376,7 @@ type restoreContext struct { itemOperationsList *[]*itemoperation.RestoreOperation resourceModifiers *resourcemodifiers.ResourceModifiers disableInformerCache bool + featureVerifier features.Verifier } type resourceClientKey struct { @@ -654,8 +663,8 @@ func (ctx *restoreContext) execute() (results.Result, results.Result) { ctx.hooksWaitGroup.Wait() close(ctx.hooksErrs) }() - for err := range ctx.hooksErrs { - errs.Velero = append(errs.Velero, err.Error()) + for errInfo := range ctx.hooksErrs { + errs.Add(errInfo.Namespace, errInfo.Err) } ctx.log.Info("Done waiting for all post-restore exec hooks to complete") @@ -1287,15 +1296,57 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso } case hasPodVolumeBackup(obj, ctx): - ctx.log.Infof("Dynamically re-provisioning persistent volume because it has a pod volume backup to be restored.") + ctx.log.WithFields(logrus.Fields{ + "namespace": obj.GetNamespace(), + "name": obj.GetName(), + "groupResource": groupResource.String(), + }).Infof("Dynamically re-provisioning persistent volume because it has a pod volume backup to be restored.") ctx.pvsToProvision.Insert(name) // Return early because we don't want to restore the PV itself, we // want to dynamically re-provision it. return warnings, errs, itemExists + case hasCSIVolumeSnapshot(ctx, obj): + ctx.log.WithFields(logrus.Fields{ + "namespace": obj.GetNamespace(), + "name": obj.GetName(), + "groupResource": groupResource.String(), + }).Infof("Dynamically re-provisioning persistent volume because it has a related CSI VolumeSnapshot.") + ctx.pvsToProvision.Insert(name) + + if ready, err := ctx.featureVerifier.Verify(velerov1api.CSIFeatureFlag); !ready { + ctx.log.Errorf("Failed to verify CSI modules, ready %v, err %v", ready, err) + errs.Add(namespace, fmt.Errorf("CSI modules are not ready for restore. Check CSI feature is enabled and CSI plugin is installed")) + } + + // Return early because we don't want to restore the PV itself, we + // want to dynamically re-provision it. + return warnings, errs, itemExists + + case hasSnapshotDataUpload(ctx, obj): + ctx.log.WithFields(logrus.Fields{ + "namespace": obj.GetNamespace(), + "name": obj.GetName(), + "groupResource": groupResource.String(), + }).Infof("Dynamically re-provisioning persistent volume because it has a related snapshot DataUpload.") + ctx.pvsToProvision.Insert(name) + + if ready, err := ctx.featureVerifier.Verify(velerov1api.CSIFeatureFlag); !ready { + ctx.log.Errorf("Failed to verify CSI modules, ready %v, err %v", ready, err) + errs.Add(namespace, fmt.Errorf("CSI modules are not ready for restore. Check CSI feature is enabled and CSI plugin is installed")) + } + + // Return early because we don't want to restore the PV itself, we + // want to dynamically re-provision it. + return warnings, errs, itemExists + case hasDeleteReclaimPolicy(obj.Object): - ctx.log.Infof("Dynamically re-provisioning persistent volume because it doesn't have a snapshot and its reclaim policy is Delete.") + ctx.log.WithFields(logrus.Fields{ + "namespace": obj.GetNamespace(), + "name": obj.GetName(), + "groupResource": groupResource.String(), + }).Infof("Dynamically re-provisioning persistent volume because it doesn't have a snapshot and its reclaim policy is Delete.") ctx.pvsToProvision.Insert(name) // Return early because we don't want to restore the PV itself, we @@ -1303,7 +1354,11 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso return warnings, errs, itemExists default: - ctx.log.Infof("Restoring persistent volume as-is because it doesn't have a snapshot and its reclaim policy is not Delete.") + ctx.log.WithFields(logrus.Fields{ + "namespace": obj.GetNamespace(), + "name": obj.GetName(), + "groupResource": groupResource.String(), + }).Infof("Restoring persistent volume as-is because it doesn't have a snapshot and its reclaim policy is not Delete.") // Check to see if the claimRef.namespace field needs to be remapped, and do so if necessary. _, err = remapClaimRefNS(ctx, obj) @@ -1337,6 +1392,13 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso continue } + // If the EnableCSI feature is not enabled, but the executing action is from CSI plugin, skip the action. + if csiutil.ShouldSkipAction(action.Name()) { + ctx.log.Infof("Skip action %s for resource %s:%s/%s, because the CSI feature is not enabled. Feature setting is %s.", + action.Name(), groupResource.String(), obj.GetNamespace(), obj.GetName(), features.Serialize()) + continue + } + ctx.log.Infof("Executing item action for %v", &groupResource) executeOutput, err := action.RestoreItemAction.Execute(&velero.RestoreItemActionExecuteInput{ Item: obj, @@ -1466,7 +1528,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso } if ctx.resourceModifiers != nil { - if errList := ctx.resourceModifiers.ApplyResourceModifierRules(obj, groupResource.String(), ctx.log); errList != nil { + if errList := ctx.resourceModifiers.ApplyResourceModifierRules(obj, groupResource.String(), ctx.kbClient.Scheme(), ctx.log); errList != nil { for _, err := range errList { errs.Add(namespace, err) } @@ -1524,18 +1586,17 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso } if restoreErr != nil { - // check for the existence of the object in cluster, if no error then it implies that object exists - // and if err then we want to judge whether there is an existing error in the previous creation. - // if so, we will return the 'get' error. - // otherwise, we will return the original creation error. + // check for the existence of the object that failed creation due to alreadyExist in cluster, if no error then it implies that object exists. + // and if err then itemExists remains false as we were not able to confirm the existence of the object via Get call or creation call. + // We return the get error as a warning to notify the user that the object could exist in cluster and we were not able to confirm it. if !ctx.disableInformerCache { fromCluster, err = ctx.getResource(groupResource, obj, namespace, name) } else { fromCluster, err = resourceClient.Get(name, metav1.GetOptions{}) } if err != nil && isAlreadyExistsError { - ctx.log.Errorf("Error retrieving in-cluster version of %s: %v", kube.NamespaceAndName(obj), err) - errs.Add(namespace, err) + ctx.log.Warnf("Unable to retrieve in-cluster version of %s: %v, object won't be restored by velero or have restore labels, and existing resource policy is not applied", kube.NamespaceAndName(obj), err) + warnings.Add(namespace, err) return warnings, errs, itemExists } } @@ -1891,10 +1952,11 @@ func (ctx *restoreContext) waitExec(createdObj *unstructured.Unstructured) { // on the ctx.podVolumeErrs channel. defer ctx.hooksWaitGroup.Done() + podNs := createdObj.GetNamespace() pod := new(v1.Pod) if err := runtime.DefaultUnstructuredConverter.FromUnstructured(createdObj.UnstructuredContent(), &pod); err != nil { ctx.log.WithError(err).Error("error converting unstructured pod") - ctx.hooksErrs <- err + ctx.hooksErrs <- hook.HookErrInfo{Namespace: podNs, Err: err} return } execHooksByContainer, err := hook.GroupRestoreExecHooks( @@ -1904,7 +1966,7 @@ func (ctx *restoreContext) waitExec(createdObj *unstructured.Unstructured) { ) if err != nil { ctx.log.WithError(err).Errorf("error getting exec hooks for pod %s/%s", pod.Namespace, pod.Name) - ctx.hooksErrs <- err + ctx.hooksErrs <- hook.HookErrInfo{Namespace: podNs, Err: err} return } @@ -1914,7 +1976,7 @@ func (ctx *restoreContext) waitExec(createdObj *unstructured.Unstructured) { for _, err := range errs { // Errors are already logged in the HandleHooks method. - ctx.hooksErrs <- err + ctx.hooksErrs <- hook.HookErrInfo{Namespace: podNs, Err: err} } } }() @@ -1930,6 +1992,55 @@ func hasSnapshot(pvName string, snapshots []*volume.Snapshot) bool { return false } +func hasCSIVolumeSnapshot(ctx *restoreContext, unstructuredPV *unstructured.Unstructured) bool { + pv := new(v1.PersistentVolume) + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredPV.Object, pv); err != nil { + ctx.log.WithError(err).Warnf("Unable to convert PV from unstructured to structured") + return false + } + + for _, vs := range ctx.csiVolumeSnapshots { + if pv.Spec.ClaimRef.Name == *vs.Spec.Source.PersistentVolumeClaimName && + pv.Spec.ClaimRef.Namespace == vs.Namespace { + return true + } + } + return false +} + +func hasSnapshotDataUpload(ctx *restoreContext, unstructuredPV *unstructured.Unstructured) bool { + pv := new(v1.PersistentVolume) + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredPV.Object, pv); err != nil { + ctx.log.WithError(err).Warnf("Unable to convert PV from unstructured to structured") + return false + } + + if pv.Spec.ClaimRef == nil { + return false + } + + dataUploadResultList := new(v1.ConfigMapList) + err := ctx.kbClient.List(go_context.TODO(), dataUploadResultList, &crclient.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{ + velerov1api.RestoreUIDLabel: label.GetValidName(string(ctx.restore.GetUID())), + velerov1api.PVCNamespaceNameLabel: label.GetValidName(pv.Spec.ClaimRef.Namespace + "." + pv.Spec.ClaimRef.Name), + velerov1api.ResourceUsageLabel: label.GetValidName(string(velerov1api.VeleroResourceUsageDataUploadResult)), + }), + }) + if err != nil { + ctx.log.WithError(err).Warnf("Fail to list DataUpload result CM.") + return false + } + + if len(dataUploadResultList.Items) != 1 { + ctx.log.WithError(fmt.Errorf("dataupload result number is not expected")). + Warnf("Got %d DataUpload result. Expect one.", len(dataUploadResultList.Items)) + return false + } + + return true +} + func hasPodVolumeBackup(unstructuredPV *unstructured.Unstructured, ctx *restoreContext) bool { if len(ctx.podVolumeBackups) == 0 { return false diff --git a/pkg/restore/restore_test.go b/pkg/restore/restore_test.go index 4ffd76257..d2f86e3c7 100644 --- a/pkg/restore/restore_test.go +++ b/pkg/restore/restore_test.go @@ -25,9 +25,11 @@ import ( "testing" "time" + snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" corev1api "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -45,6 +47,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/builder" "github.com/vmware-tanzu/velero/pkg/client" "github.com/vmware-tanzu/velero/pkg/discovery" + verifiermocks "github.com/vmware-tanzu/velero/pkg/features/mocks" "github.com/vmware-tanzu/velero/pkg/itemoperation" "github.com/vmware-tanzu/velero/pkg/kuberesource" "github.com/vmware-tanzu/velero/pkg/plugin/velero" @@ -2256,6 +2259,7 @@ func (*volumeSnapshotter) DeleteSnapshot(snapshotID string) error { // Verification is done by looking at the contents of the API and the metadata/spec/status of // the items in the API. func TestRestorePersistentVolumes(t *testing.T) { + testPVCName := "testPVC" tests := []struct { name string restore *velerov1api.Restore @@ -2265,9 +2269,12 @@ func TestRestorePersistentVolumes(t *testing.T) { volumeSnapshots []*volume.Snapshot volumeSnapshotLocations []*velerov1api.VolumeSnapshotLocation volumeSnapshotterGetter volumeSnapshotterGetter + csiVolumeSnapshots []*snapshotv1api.VolumeSnapshot + dataUploadResult *corev1api.ConfigMap want []*test.APIResource wantError bool wantWarning bool + csiFeatureVerifierErr string }{ { name: "when a PV with a reclaim policy of delete has no snapshot and does not exist in-cluster, it does not get restored, and its PVC gets reset for dynamic provisioning", @@ -2923,6 +2930,152 @@ func TestRestorePersistentVolumes(t *testing.T) { ), }, }, + { + name: "when a PV with a reclaim policy of retain has a CSI VolumeSnapshot and does not exist in-cluster, the PV is not restored", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: test.NewTarWriter(t). + AddItems("persistentvolumes", + builder.ForPersistentVolume("pv-1"). + ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain). + ClaimRef("velero", testPVCName). + Result(), + ). + Done(), + apiResources: []*test.APIResource{ + test.PVs(), + test.PVCs(), + }, + csiVolumeSnapshots: []*snapshotv1api.VolumeSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "velero", + Name: "test", + }, + Spec: snapshotv1api.VolumeSnapshotSpec{ + Source: snapshotv1api.VolumeSnapshotSource{ + PersistentVolumeClaimName: &testPVCName, + }, + }, + }, + }, + volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{ + builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "default").Provider("provider-1").Result(), + }, + volumeSnapshotterGetter: map[string]vsv1.VolumeSnapshotter{ + "provider-1": &volumeSnapshotter{ + snapshotVolumes: map[string]string{"snapshot-1": "new-volume"}, + }, + }, + want: []*test.APIResource{}, + }, + { + name: "when a PV has a CSI VolumeSnapshot, but CSI modules are not ready, the PV is not restored", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: test.NewTarWriter(t). + AddItems("persistentvolumes", + builder.ForPersistentVolume("pv-1"). + ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain). + ClaimRef("velero", testPVCName). + Result(), + ). + Done(), + apiResources: []*test.APIResource{ + test.PVs(), + test.PVCs(), + }, + csiVolumeSnapshots: []*snapshotv1api.VolumeSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "velero", + Name: "test", + }, + Spec: snapshotv1api.VolumeSnapshotSpec{ + Source: snapshotv1api.VolumeSnapshotSource{ + PersistentVolumeClaimName: &testPVCName, + }, + }, + }, + }, + volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{ + builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "default").Provider("provider-1").Result(), + }, + volumeSnapshotterGetter: map[string]vsv1.VolumeSnapshotter{ + "provider-1": &volumeSnapshotter{ + snapshotVolumes: map[string]string{"snapshot-1": "new-volume"}, + }, + }, + want: []*test.APIResource{}, + csiFeatureVerifierErr: "fake-feature-check-error", + wantError: true, + }, + { + name: "when a PV with a reclaim policy of retain has a DataUpload result CM and does not exist in-cluster, the PV is not restored", + restore: defaultRestore().ObjectMeta(builder.WithUID("fakeUID")).Result(), + backup: defaultBackup().Result(), + tarball: test.NewTarWriter(t). + AddItems("persistentvolumes", + builder.ForPersistentVolume("pv-1"). + ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain). + ClaimRef("velero", testPVCName). + Result(), + ). + Done(), + apiResources: []*test.APIResource{ + test.PVs(), + test.PVCs(), + test.ConfigMaps(), + }, + volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{ + builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "default").Provider("provider-1").Result(), + }, + volumeSnapshotterGetter: map[string]vsv1.VolumeSnapshotter{ + "provider-1": &volumeSnapshotter{ + snapshotVolumes: map[string]string{"snapshot-1": "new-volume"}, + }, + }, + dataUploadResult: builder.ForConfigMap("velero", "test").ObjectMeta(builder.WithLabelsMap(map[string]string{ + velerov1api.RestoreUIDLabel: "fakeUID", + velerov1api.PVCNamespaceNameLabel: "velero.testPVC", + velerov1api.ResourceUsageLabel: string(velerov1api.VeleroResourceUsageDataUploadResult), + })).Result(), + want: []*test.APIResource{}, + }, + { + name: "when a PV has a DataUpload result CM, but CSI modules are not ready, the PV is not restored", + restore: defaultRestore().ObjectMeta(builder.WithUID("fakeUID")).Result(), + backup: defaultBackup().Result(), + tarball: test.NewTarWriter(t). + AddItems("persistentvolumes", + builder.ForPersistentVolume("pv-1"). + ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain). + ClaimRef("velero", testPVCName). + Result(), + ). + Done(), + apiResources: []*test.APIResource{ + test.PVs(), + test.PVCs(), + test.ConfigMaps(), + }, + volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{ + builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "default").Provider("provider-1").Result(), + }, + volumeSnapshotterGetter: map[string]vsv1.VolumeSnapshotter{ + "provider-1": &volumeSnapshotter{ + snapshotVolumes: map[string]string{"snapshot-1": "new-volume"}, + }, + }, + dataUploadResult: builder.ForConfigMap("velero", "test").ObjectMeta(builder.WithLabelsMap(map[string]string{ + velerov1api.RestoreUIDLabel: "fakeUID", + velerov1api.PVCNamespaceNameLabel: "velero.testPVC", + velerov1api.ResourceUsageLabel: string(velerov1api.VeleroResourceUsageDataUploadResult), + })).Result(), + want: []*test.APIResource{}, + csiFeatureVerifierErr: "fake-feature-check-error", + wantError: true, + }, } for _, tc := range tests { @@ -2934,11 +3087,23 @@ func TestRestorePersistentVolumes(t *testing.T) { return renamed, nil } + verifierMock := new(verifiermocks.Verifier) + if tc.csiFeatureVerifierErr != "" { + verifierMock.On("Verify", mock.Anything, mock.Anything).Return(false, errors.New(tc.csiFeatureVerifierErr)) + } else { + verifierMock.On("Verify", mock.Anything, mock.Anything).Return(true, nil) + } + h.restorer.featureVerifier = verifierMock + // set up the VolumeSnapshotLocation client and add test data to it for _, vsl := range tc.volumeSnapshotLocations { require.NoError(t, h.restorer.kbClient.Create(context.Background(), vsl)) } + if tc.dataUploadResult != nil { + require.NoError(t, h.restorer.kbClient.Create(context.TODO(), tc.dataUploadResult)) + } + for _, r := range tc.apiResources { h.AddItems(t, r) } @@ -2955,11 +3120,12 @@ func TestRestorePersistentVolumes(t *testing.T) { } data := &Request{ - Log: h.log, - Restore: tc.restore, - Backup: tc.backup, - VolumeSnapshots: tc.volumeSnapshots, - BackupReader: tc.tarball, + Log: h.log, + Restore: tc.restore, + Backup: tc.backup, + VolumeSnapshots: tc.volumeSnapshots, + BackupReader: tc.tarball, + CSIVolumeSnapshots: tc.csiVolumeSnapshots, } warnings, errs := h.restorer.Restore( data, @@ -3652,3 +3818,175 @@ func TestIsAlreadyExistsError(t *testing.T) { }) } } + +func TestHasCSIVolumeSnapshot(t *testing.T) { + tests := []struct { + name string + vs *snapshotv1api.VolumeSnapshot + obj *unstructured.Unstructured + expectedResult bool + }{ + { + name: "Invalid PV, expect false.", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": 1, + }, + }, + expectedResult: false, + }, + { + name: "Cannot find VS, expect false", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "PersistentVolume", + "apiVersion": "v1", + "metadata": map[string]interface{}{ + "namespace": "default", + "name": "test", + }, + }, + }, + expectedResult: false, + }, + { + name: "Find VS, expect true.", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "PersistentVolume", + "apiVersion": "v1", + "metadata": map[string]interface{}{ + "namespace": "velero", + "name": "test", + }, + "spec": map[string]interface{}{ + "claimRef": map[string]interface{}{ + "namespace": "velero", + "name": "test", + }, + }, + }, + }, + vs: builder.ForVolumeSnapshot("velero", "test").SourcePVC("test").Result(), + expectedResult: true, + }, + } + + for _, tc := range tests { + h := newHarness(t) + + ctx := &restoreContext{ + log: h.log, + } + + if tc.vs != nil { + ctx.csiVolumeSnapshots = []*snapshotv1api.VolumeSnapshot{tc.vs} + } + + t.Run(tc.name, func(t *testing.T) { + require.Equal(t, tc.expectedResult, hasCSIVolumeSnapshot(ctx, tc.obj)) + }) + } +} + +func TestHasSnapshotDataUpload(t *testing.T) { + tests := []struct { + name string + duResult *corev1api.ConfigMap + obj *unstructured.Unstructured + expectedResult bool + restore *velerov1api.Restore + }{ + { + name: "Invalid PV, expect false.", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": 1, + }, + }, + expectedResult: false, + }, + { + name: "PV without ClaimRef, expect false", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "PersistentVolume", + "apiVersion": "v1", + "metadata": map[string]interface{}{ + "namespace": "default", + "name": "test", + }, + }, + }, + duResult: builder.ForConfigMap("velero", "test").Result(), + restore: builder.ForRestore("velero", "test").ObjectMeta(builder.WithUID("fakeUID")).Result(), + expectedResult: false, + }, + { + name: "Cannot find DataUploadResult CM, expect false", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "PersistentVolume", + "apiVersion": "v1", + "metadata": map[string]interface{}{ + "namespace": "default", + "name": "test", + }, + "spec": map[string]interface{}{ + "claimRef": map[string]interface{}{ + "namespace": "velero", + "name": "testPVC", + }, + }, + }, + }, + duResult: builder.ForConfigMap("velero", "test").Result(), + restore: builder.ForRestore("velero", "test").ObjectMeta(builder.WithUID("fakeUID")).Result(), + expectedResult: false, + }, + { + name: "Find DataUploadResult CM, expect true", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "PersistentVolume", + "apiVersion": "v1", + "metadata": map[string]interface{}{ + "namespace": "default", + "name": "test", + }, + "spec": map[string]interface{}{ + "claimRef": map[string]interface{}{ + "namespace": "velero", + "name": "testPVC", + }, + }, + }, + }, + duResult: builder.ForConfigMap("velero", "test").ObjectMeta(builder.WithLabelsMap(map[string]string{ + velerov1api.RestoreUIDLabel: "fakeUID", + velerov1api.PVCNamespaceNameLabel: "velero/testPVC", + velerov1api.ResourceUsageLabel: string(velerov1api.VeleroResourceUsageDataUploadResult), + })).Result(), + restore: builder.ForRestore("velero", "test").ObjectMeta(builder.WithUID("fakeUID")).Result(), + expectedResult: false, + }, + } + + for _, tc := range tests { + h := newHarness(t) + + ctx := &restoreContext{ + log: h.log, + kbClient: h.restorer.kbClient, + restore: tc.restore, + } + + if tc.duResult != nil { + require.NoError(t, ctx.kbClient.Create(context.TODO(), tc.duResult)) + } + + t.Run(tc.name, func(t *testing.T) { + require.Equal(t, tc.expectedResult, hasSnapshotDataUpload(ctx, tc.obj)) + }) + } +} diff --git a/pkg/restore/service_action.go b/pkg/restore/service_action.go index 2dbac3a89..6fc1b2cb4 100644 --- a/pkg/restore/service_action.go +++ b/pkg/restore/service_action.go @@ -66,6 +66,9 @@ func (a *ServiceAction) Execute(input *velero.RestoreItemActionExecuteInput) (*v if err := deleteNodePorts(service); err != nil { return nil, err } + if err := deleteHealthCheckNodePort(service); err != nil { + return nil, err + } } res, err := runtime.DefaultUnstructuredConverter.ToUnstructured(service) @@ -76,6 +79,72 @@ func (a *ServiceAction) Execute(input *velero.RestoreItemActionExecuteInput) (*v return velero.NewRestoreItemActionExecuteOutput(&unstructured.Unstructured{Object: res}), nil } +func deleteHealthCheckNodePort(service *corev1api.Service) error { + // Check service type and external traffic policy setting, + // if the setting is not applicable for HealthCheckNodePort, return early. + if service.Spec.ExternalTrafficPolicy != corev1api.ServiceExternalTrafficPolicyTypeLocal || + service.Spec.Type != corev1api.ServiceTypeLoadBalancer { + return nil + } + + // HealthCheckNodePort is already 0, return. + if service.Spec.HealthCheckNodePort == 0 { + return nil + } + + // Search HealthCheckNodePort from server's last-applied-configuration + // annotation(HealthCheckNodePort is specified by `kubectl apply` command) + lastAppliedConfig, ok := service.Annotations[annotationLastAppliedConfig] + if ok { + appliedServiceUnstructured := new(map[string]interface{}) + if err := json.Unmarshal([]byte(lastAppliedConfig), appliedServiceUnstructured); err != nil { + return errors.WithStack(err) + } + + healthCheckNodePort, exist, err := unstructured.NestedFloat64(*appliedServiceUnstructured, "spec", "healthCheckNodePort") + if err != nil { + return errors.WithStack(err) + } + + // Found healthCheckNodePort in lastAppliedConfig annotation, + // and the value is not 0. No need to delete, return. + if exist && healthCheckNodePort != 0 { + return nil + } + } + + // Search HealthCheckNodePort from ManagedFields(HealthCheckNodePort + // is specified by `kubectl apply --server-side` command). + for _, entry := range service.GetManagedFields() { + if entry.FieldsV1 == nil { + continue + } + fields := new(map[string]interface{}) + if err := json.Unmarshal(entry.FieldsV1.Raw, fields); err != nil { + return errors.WithStack(err) + } + + _, exist, err := unstructured.NestedMap(*fields, "f:spec", "f:healthCheckNodePort") + if err != nil { + return errors.WithStack(err) + } + if !exist { + continue + } + // Because the format in ManagedFields is `f:healthCheckNodePort: {}`, + // cannot get the value, check whether exists is enough. + // Found healthCheckNodePort in ManagedFields. + // No need to delete. Return. + return nil + } + + // Cannot find HealthCheckNodePort from Annotation and + // ManagedFields, which means it's auto-generated. Delete it. + service.Spec.HealthCheckNodePort = 0 + + return nil +} + func deleteNodePorts(service *corev1api.Service) error { if service.Spec.Type == corev1api.ServiceTypeExternalName { return nil diff --git a/pkg/restore/service_action_test.go b/pkg/restore/service_action_test.go index d80cc1c45..be722fa2a 100644 --- a/pkg/restore/service_action_test.go +++ b/pkg/restore/service_action_test.go @@ -36,7 +36,8 @@ import ( func svcJSON(ports ...corev1api.ServicePort) string { svc := corev1api.Service{ Spec: corev1api.ServiceSpec{ - Ports: ports, + HealthCheckNodePort: 8080, + Ports: ports, }, } @@ -486,6 +487,164 @@ func TestServiceActionExecute(t *testing.T) { }, }, }, + { + name: "If PreserveNodePorts is True in restore spec then HealthCheckNodePort always preserved.", + obj: corev1api.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "svc-1", + }, + Spec: corev1api.ServiceSpec{ + HealthCheckNodePort: 8080, + ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeLocal, + Type: corev1api.ServiceTypeLoadBalancer, + Ports: []corev1api.ServicePort{ + { + Name: "http", + Port: 80, + NodePort: 8080, + }, + { + Name: "hepsiburada", + NodePort: 9025, + }, + }, + }, + }, + restore: builder.ForRestore(api.DefaultNamespace, "").PreserveNodePorts(true).Result(), + expectedRes: corev1api.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "svc-1", + }, + Spec: corev1api.ServiceSpec{ + HealthCheckNodePort: 8080, + ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeLocal, + Type: corev1api.ServiceTypeLoadBalancer, + Ports: []corev1api.ServicePort{ + { + Name: "http", + Port: 80, + NodePort: 8080, + }, + { + Name: "hepsiburada", + NodePort: 9025, + }, + }, + }, + }, + }, + { + name: "If PreserveNodePorts is False in restore spec then HealthCheckNodePort should be cleaned.", + obj: corev1api.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "svc-1", + }, + Spec: corev1api.ServiceSpec{ + HealthCheckNodePort: 8080, + ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeLocal, + Type: corev1api.ServiceTypeLoadBalancer, + }, + }, + restore: builder.ForRestore(api.DefaultNamespace, "").PreserveNodePorts(false).Result(), + expectedRes: corev1api.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "svc-1", + }, + Spec: corev1api.ServiceSpec{ + HealthCheckNodePort: 0, + ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeLocal, + Type: corev1api.ServiceTypeLoadBalancer, + }, + }, + }, + { + name: "If PreserveNodePorts is false in restore spec, but service is not expected, then HealthCheckNodePort should be kept.", + obj: corev1api.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "svc-1", + }, + Spec: corev1api.ServiceSpec{ + HealthCheckNodePort: 8080, + ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeCluster, + Type: corev1api.ServiceTypeLoadBalancer, + }, + }, + restore: builder.ForRestore(api.DefaultNamespace, "").PreserveNodePorts(false).Result(), + expectedRes: corev1api.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "svc-1", + }, + Spec: corev1api.ServiceSpec{ + HealthCheckNodePort: 8080, + ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeCluster, + Type: corev1api.ServiceTypeLoadBalancer, + }, + }, + }, + { + name: "If PreserveNodePorts is false in restore spec, but HealthCheckNodePort can be found in Annotation, then it should be kept.", + obj: corev1api.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "svc-1", + Annotations: map[string]string{annotationLastAppliedConfig: svcJSON()}, + }, + Spec: corev1api.ServiceSpec{ + HealthCheckNodePort: 8080, + ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeLocal, + Type: corev1api.ServiceTypeLoadBalancer, + }, + }, + restore: builder.ForRestore(api.DefaultNamespace, "").PreserveNodePorts(false).Result(), + expectedRes: corev1api.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "svc-1", + Annotations: map[string]string{annotationLastAppliedConfig: svcJSON()}, + }, + Spec: corev1api.ServiceSpec{ + HealthCheckNodePort: 8080, + ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeLocal, + Type: corev1api.ServiceTypeLoadBalancer, + }, + }, + }, + { + name: "If PreserveNodePorts is false in restore spec, but HealthCheckNodePort can be found in ManagedFields, then it should be kept.", + obj: corev1api.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "svc-1", + ManagedFields: []metav1.ManagedFieldsEntry{ + { + FieldsV1: &metav1.FieldsV1{ + Raw: []byte(`{"f:spec":{"f:healthCheckNodePort":{}}}`), + }, + }, + }, + }, + Spec: corev1api.ServiceSpec{ + HealthCheckNodePort: 8080, + ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeLocal, + Type: corev1api.ServiceTypeLoadBalancer, + }, + }, + restore: builder.ForRestore(api.DefaultNamespace, "").PreserveNodePorts(false).Result(), + expectedRes: corev1api.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "svc-1", + ManagedFields: []metav1.ManagedFieldsEntry{ + { + FieldsV1: &metav1.FieldsV1{ + Raw: []byte(`{"f:spec":{"f:healthCheckNodePort":{}}}`), + }, + }, + }, + }, + Spec: corev1api.ServiceSpec{ + HealthCheckNodePort: 8080, + ExternalTrafficPolicy: corev1api.ServiceExternalTrafficPolicyTypeLocal, + Type: corev1api.ServiceTypeLoadBalancer, + }, + }, + }, } for _, test := range tests { diff --git a/pkg/test/api_server.go b/pkg/test/api_server.go index c96f742e8..dd5b0a07a 100644 --- a/pkg/test/api_server.go +++ b/pkg/test/api_server.go @@ -24,14 +24,11 @@ import ( discoveryfake "k8s.io/client-go/discovery/fake" dynamicfake "k8s.io/client-go/dynamic/fake" kubefake "k8s.io/client-go/kubernetes/fake" - - "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/fake" ) // APIServer contains in-memory fakes for all of the relevant // Kubernetes API server clients. type APIServer struct { - VeleroClient *fake.Clientset KubeClient *kubefake.Clientset DynamicClient *dynamicfake.FakeDynamicClient DiscoveryClient *DiscoveryClient @@ -43,7 +40,6 @@ func NewAPIServer(t *testing.T) *APIServer { t.Helper() var ( - veleroClient = fake.NewSimpleClientset() kubeClient = kubefake.NewSimpleClientset() dynamicClient = dynamicfake.NewSimpleDynamicClientWithCustomListKinds(runtime.NewScheme(), map[schema.GroupVersionResource]string{ @@ -65,7 +61,6 @@ func NewAPIServer(t *testing.T) *APIServer { ) return &APIServer{ - VeleroClient: veleroClient, KubeClient: kubeClient, DynamicClient: dynamicClient, DiscoveryClient: discoveryClient, diff --git a/pkg/test/fake_controller_runtime_client.go b/pkg/test/fake_controller_runtime_client.go index b64f57809..c73ddd306 100644 --- a/pkg/test/fake_controller_runtime_client.go +++ b/pkg/test/fake_controller_runtime_client.go @@ -21,6 +21,7 @@ import ( snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" "github.com/stretchr/testify/require" + appsv1api "k8s.io/api/apps/v1" corev1api "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -38,6 +39,8 @@ func NewFakeControllerRuntimeClientBuilder(t *testing.T) *k8sfake.ClientBuilder require.NoError(t, err) err = corev1api.AddToScheme(scheme) require.NoError(t, err) + err = appsv1api.AddToScheme(scheme) + require.NoError(t, err) err = snapshotv1api.AddToScheme(scheme) require.NoError(t, err) return k8sfake.NewClientBuilder().WithScheme(scheme) @@ -51,7 +54,13 @@ func NewFakeControllerRuntimeClient(t *testing.T, initObjs ...runtime.Object) cl require.NoError(t, err) err = corev1api.AddToScheme(scheme) require.NoError(t, err) + err = appsv1api.AddToScheme(scheme) + require.NoError(t, err) err = snapshotv1api.AddToScheme(scheme) require.NoError(t, err) return k8sfake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(initObjs...).Build() } + +func NewFakeControllerRuntimeWatchClient(t *testing.T, initObjs ...runtime.Object) client.WithWatch { + return NewFakeControllerRuntimeClientBuilder(t).WithRuntimeObjects(initObjs...).Build() +} diff --git a/pkg/test/mocks.go b/pkg/test/mocks.go new file mode 100644 index 000000000..9a86d2b70 --- /dev/null +++ b/pkg/test/mocks.go @@ -0,0 +1,20 @@ +package test + +import ( + snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" + snapshotv1listers "github.com/kubernetes-csi/external-snapshotter/client/v4/listers/volumesnapshot/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// VolumeSnapshotLister helps list VolumeSnapshots. +// All objects returned here must be treated as read-only. +// +//go:generate mockery --name VolumeSnapshotLister +type VolumeSnapshotLister interface { + // List lists all VolumeSnapshots in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*snapshotv1.VolumeSnapshot, err error) + // VolumeSnapshots returns an object that can list and get VolumeSnapshots. + VolumeSnapshots(namespace string) snapshotv1listers.VolumeSnapshotNamespaceLister + snapshotv1listers.VolumeSnapshotListerExpansion +} diff --git a/pkg/test/mocks/VolumeSnapshotLister.go b/pkg/test/mocks/VolumeSnapshotLister.go new file mode 100644 index 000000000..21e4bcb66 --- /dev/null +++ b/pkg/test/mocks/VolumeSnapshotLister.go @@ -0,0 +1,73 @@ +// Code generated by mockery v2.35.4. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + labels "k8s.io/apimachinery/pkg/labels" + + v1 "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" + + volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v4/listers/volumesnapshot/v1" +) + +// VolumeSnapshotLister is an autogenerated mock type for the VolumeSnapshotLister type +type VolumeSnapshotLister struct { + mock.Mock +} + +// List provides a mock function with given fields: selector +func (_m *VolumeSnapshotLister) List(selector labels.Selector) ([]*v1.VolumeSnapshot, error) { + ret := _m.Called(selector) + + var r0 []*v1.VolumeSnapshot + var r1 error + if rf, ok := ret.Get(0).(func(labels.Selector) ([]*v1.VolumeSnapshot, error)); ok { + return rf(selector) + } + if rf, ok := ret.Get(0).(func(labels.Selector) []*v1.VolumeSnapshot); ok { + r0 = rf(selector) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*v1.VolumeSnapshot) + } + } + + if rf, ok := ret.Get(1).(func(labels.Selector) error); ok { + r1 = rf(selector) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// VolumeSnapshots provides a mock function with given fields: namespace +func (_m *VolumeSnapshotLister) VolumeSnapshots(namespace string) volumesnapshotv1.VolumeSnapshotNamespaceLister { + ret := _m.Called(namespace) + + var r0 volumesnapshotv1.VolumeSnapshotNamespaceLister + if rf, ok := ret.Get(0).(func(string) volumesnapshotv1.VolumeSnapshotNamespaceLister); ok { + r0 = rf(namespace) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(volumesnapshotv1.VolumeSnapshotNamespaceLister) + } + } + + return r0 +} + +// NewVolumeSnapshotLister creates a new instance of VolumeSnapshotLister. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewVolumeSnapshotLister(t interface { + mock.TestingT + Cleanup(func()) +}) *VolumeSnapshotLister { + mock := &VolumeSnapshotLister{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/pkg/test/resources.go b/pkg/test/resources.go index 7c2fa17f6..709497fca 100644 --- a/pkg/test/resources.go +++ b/pkg/test/resources.go @@ -142,6 +142,17 @@ func ServiceAccounts(items ...metav1.Object) *APIResource { } } +func ConfigMaps(items ...metav1.Object) *APIResource { + return &APIResource{ + Group: "", + Version: "v1", + Name: "configmaps", + ShortName: "cm", + Namespaced: true, + Items: items, + } +} + func CRDs(items ...metav1.Object) *APIResource { return &APIResource{ Group: "apiextensions.k8s.io", diff --git a/pkg/test/test_logger.go b/pkg/test/test_logger.go index d8095a79d..b890fd5da 100644 --- a/pkg/test/test_logger.go +++ b/pkg/test/test_logger.go @@ -40,7 +40,7 @@ type singleLogRecorder struct { } func (s *singleLogRecorder) Write(p []byte) (n int, err error) { - *s.buffer = string(p[:]) + *s.buffer = *s.buffer + string(p[:]) return len(p), nil } diff --git a/pkg/uploader/kopia/block_backup.go b/pkg/uploader/kopia/block_backup.go index a637925a4..ad90b723f 100644 --- a/pkg/uploader/kopia/block_backup.go +++ b/pkg/uploader/kopia/block_backup.go @@ -1,3 +1,6 @@ +//go:build !windows +// +build !windows + /* Copyright The Velero Contributors. diff --git a/pkg/uploader/kopia/block_backup_windows.go b/pkg/uploader/kopia/block_backup_windows.go new file mode 100644 index 000000000..e6b928456 --- /dev/null +++ b/pkg/uploader/kopia/block_backup_windows.go @@ -0,0 +1,30 @@ +//go:build windows +// +build windows + +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kopia + +import ( + "fmt" + + "github.com/kopia/kopia/fs" +) + +func getLocalBlockEntry(sourcePath string) (fs.Entry, error) { + return nil, fmt.Errorf("block mode is not supported for Windows") +} diff --git a/pkg/uploader/kopia/block_restore.go b/pkg/uploader/kopia/block_restore.go index 25d11ee24..22c8ec1fc 100644 --- a/pkg/uploader/kopia/block_restore.go +++ b/pkg/uploader/kopia/block_restore.go @@ -1,3 +1,6 @@ +//go:build !windows +// +build !windows + /* Copyright The Velero Contributors. diff --git a/pkg/uploader/kopia/block_restore_windows.go b/pkg/uploader/kopia/block_restore_windows.go new file mode 100644 index 000000000..ff6e726c8 --- /dev/null +++ b/pkg/uploader/kopia/block_restore_windows.go @@ -0,0 +1,42 @@ +//go:build windows +// +build windows + +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kopia + +import ( + "context" + "fmt" + + "github.com/kopia/kopia/fs" + "github.com/kopia/kopia/snapshot/restore" +) + +type BlockOutput struct { + *restore.FilesystemOutput + + targetFileName string +} + +func (o *BlockOutput) WriteFile(ctx context.Context, relativePath string, remoteFile fs.File) error { + return fmt.Errorf("block mode is not supported for Windows") +} + +func (o *BlockOutput) BeginDirectory(ctx context.Context, relativePath string, e fs.Directory) error { + return fmt.Errorf("block mode is not supported for Windows") +} diff --git a/pkg/uploader/kopia/shim.go b/pkg/uploader/kopia/shim.go index 94856ce97..d20b7fa01 100644 --- a/pkg/uploader/kopia/shim.go +++ b/pkg/uploader/kopia/shim.go @@ -27,6 +27,7 @@ import ( "github.com/kopia/kopia/repo" "github.com/kopia/kopia/repo/content" + "github.com/kopia/kopia/repo/content/index" "github.com/kopia/kopia/repo/manifest" "github.com/kopia/kopia/repo/object" ) @@ -140,7 +141,7 @@ func (sr *shimRepository) Refresh(ctx context.Context) error { // ContentInfo not supported func (sr *shimRepository) ContentInfo(ctx context.Context, contentID content.ID) (content.Info, error) { - return nil, errors.New("ContentInfo is not supported") + return index.Info{}, errors.New("ContentInfo is not supported") } // PrefetchContents is not supported by unified repo diff --git a/pkg/uploader/kopia/snapshot.go b/pkg/uploader/kopia/snapshot.go index 587eafa8f..a34a1e553 100644 --- a/pkg/uploader/kopia/snapshot.go +++ b/pkg/uploader/kopia/snapshot.go @@ -243,10 +243,10 @@ func SnapshotSource( mani, err := loadSnapshotFunc(ctx, rep, manifest.ID(parentSnapshot)) if err != nil { - return "", 0, errors.Wrapf(err, "Failed to load previous snapshot %v from kopia", parentSnapshot) + log.WithError(err).Warnf("Failed to load previous snapshot %v from kopia, fallback to full backup", parentSnapshot) + } else { + previous = append(previous, mani) } - - previous = append(previous, mani) } else { log.Infof("Searching for parent snapshot") diff --git a/pkg/uploader/kopia/snapshot_test.go b/pkg/uploader/kopia/snapshot_test.go index f8611df67..645434942 100644 --- a/pkg/uploader/kopia/snapshot_test.go +++ b/pkg/uploader/kopia/snapshot_test.go @@ -114,7 +114,7 @@ func TestSnapshotSource(t *testing.T) { notError: true, }, { - name: "failed to load snapshot", + name: "failed to load snapshot, should fallback to full backup and not error", args: []mockArgs{ {methodName: "LoadSnapshot", returns: []interface{}{manifest, errors.New("failed to load snapshot")}}, {methodName: "SaveSnapshot", returns: []interface{}{manifest.ID, nil}}, @@ -124,7 +124,7 @@ func TestSnapshotSource(t *testing.T) { {methodName: "Upload", returns: []interface{}{manifest, nil}}, {methodName: "Flush", returns: []interface{}{nil}}, }, - notError: false, + notError: true, }, { name: "failed to save snapshot", diff --git a/pkg/uploader/provider/kopia_test.go b/pkg/uploader/provider/kopia_test.go index 6cfc33953..c1fc95724 100644 --- a/pkg/uploader/provider/kopia_test.go +++ b/pkg/uploader/provider/kopia_test.go @@ -36,12 +36,12 @@ import ( "github.com/vmware-tanzu/velero/internal/credentials/mocks" "github.com/vmware-tanzu/velero/pkg/apis/velero/shared" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme" "github.com/vmware-tanzu/velero/pkg/repository" udmrepo "github.com/vmware-tanzu/velero/pkg/repository/udmrepo" udmrepomocks "github.com/vmware-tanzu/velero/pkg/repository/udmrepo/mocks" "github.com/vmware-tanzu/velero/pkg/uploader" "github.com/vmware-tanzu/velero/pkg/uploader/kopia" + "github.com/vmware-tanzu/velero/pkg/util" ) type FakeBackupProgressUpdater struct { @@ -65,7 +65,7 @@ func (f *FakeRestoreProgressUpdater) UpdateProgress(p *uploader.Progress) {} func TestRunBackup(t *testing.T) { var kp kopiaProvider kp.log = logrus.New() - updater := FakeBackupProgressUpdater{PodVolumeBackup: &velerov1api.PodVolumeBackup{}, Log: kp.log, Ctx: context.Background(), Cli: fake.NewClientBuilder().WithScheme(scheme.Scheme).Build()} + updater := FakeBackupProgressUpdater{PodVolumeBackup: &velerov1api.PodVolumeBackup{}, Log: kp.log, Ctx: context.Background(), Cli: fake.NewClientBuilder().WithScheme(util.VeleroScheme).Build()} testCases := []struct { name string @@ -122,7 +122,7 @@ func TestRunBackup(t *testing.T) { func TestRunRestore(t *testing.T) { var kp kopiaProvider kp.log = logrus.New() - updater := FakeRestoreProgressUpdater{PodVolumeRestore: &velerov1api.PodVolumeRestore{}, Log: kp.log, Ctx: context.Background(), Cli: fake.NewClientBuilder().WithScheme(scheme.Scheme).Build()} + updater := FakeRestoreProgressUpdater{PodVolumeRestore: &velerov1api.PodVolumeRestore{}, Log: kp.log, Ctx: context.Background(), Cli: fake.NewClientBuilder().WithScheme(util.VeleroScheme).Build()} testCases := []struct { name string diff --git a/pkg/uploader/provider/provider_test.go b/pkg/uploader/provider/provider_test.go index 19e666862..e04ff78b8 100644 --- a/pkg/uploader/provider/provider_test.go +++ b/pkg/uploader/provider/provider_test.go @@ -28,7 +28,7 @@ import ( "github.com/vmware-tanzu/velero/internal/credentials" "github.com/vmware-tanzu/velero/internal/credentials/mocks" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme" + "github.com/vmware-tanzu/velero/pkg/util" ) type NewUploaderProviderTestCase struct { @@ -42,7 +42,7 @@ type NewUploaderProviderTestCase struct { func TestNewUploaderProvider(t *testing.T) { // Mock objects or dependencies ctx := context.Background() - client := fake.NewClientBuilder().WithScheme(scheme.Scheme).Build() + client := fake.NewClientBuilder().WithScheme(util.VeleroScheme).Build() repoIdentifier := "repoIdentifier" bsl := &velerov1api.BackupStorageLocation{} backupRepo := &velerov1api.BackupRepository{} diff --git a/pkg/uploader/provider/restic_test.go b/pkg/uploader/provider/restic_test.go index b619673a7..62f289968 100644 --- a/pkg/uploader/provider/restic_test.go +++ b/pkg/uploader/provider/restic_test.go @@ -34,9 +34,9 @@ import ( "github.com/vmware-tanzu/velero/pkg/apis/velero/shared" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/builder" - "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme" "github.com/vmware-tanzu/velero/pkg/restic" "github.com/vmware-tanzu/velero/pkg/uploader" + "github.com/vmware-tanzu/velero/pkg/util" "github.com/vmware-tanzu/velero/pkg/util/filesystem" ) @@ -149,7 +149,7 @@ func TestResticRunBackup(t *testing.T) { tc.volMode = uploader.PersistentVolumeFilesystem } if !tc.nilUpdater { - updater := FakeBackupProgressUpdater{PodVolumeBackup: &velerov1api.PodVolumeBackup{}, Log: tc.rp.log, Ctx: context.Background(), Cli: fake.NewClientBuilder().WithScheme(scheme.Scheme).Build()} + updater := FakeBackupProgressUpdater{PodVolumeBackup: &velerov1api.PodVolumeBackup{}, Log: tc.rp.log, Ctx: context.Background(), Cli: fake.NewClientBuilder().WithScheme(util.VeleroScheme).Build()} _, _, err = tc.rp.RunBackup(context.Background(), "var", "", map[string]string{}, false, parentSnapshot, tc.volMode, shared.UploaderConfig{}, &updater) } else { _, _, err = tc.rp.RunBackup(context.Background(), "var", "", map[string]string{}, false, parentSnapshot, tc.volMode, shared.UploaderConfig{}, nil) @@ -222,7 +222,7 @@ func TestResticRunRestore(t *testing.T) { } var err error if !tc.nilUpdater { - updater := FakeBackupProgressUpdater{PodVolumeBackup: &velerov1api.PodVolumeBackup{}, Log: tc.rp.log, Ctx: context.Background(), Cli: fake.NewClientBuilder().WithScheme(scheme.Scheme).Build()} + updater := FakeBackupProgressUpdater{PodVolumeBackup: &velerov1api.PodVolumeBackup{}, Log: tc.rp.log, Ctx: context.Background(), Cli: fake.NewClientBuilder().WithScheme(util.VeleroScheme).Build()} err = tc.rp.RunRestore(context.Background(), "", "var", tc.volMode, &updater) } else { err = tc.rp.RunRestore(context.Background(), "", "var", tc.volMode, nil) diff --git a/pkg/util/csi/util.go b/pkg/util/csi/util.go new file mode 100644 index 000000000..bcc424d1b --- /dev/null +++ b/pkg/util/csi/util.go @@ -0,0 +1,32 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package csi + +import ( + "strings" + + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/features" +) + +const ( + csiPluginNamePrefix = "velero.io/csi-" +) + +func ShouldSkipAction(actionName string) bool { + return !features.IsEnabled(velerov1api.CSIFeatureFlag) && strings.Contains(actionName, csiPluginNamePrefix) +} diff --git a/pkg/util/csi/util_test.go b/pkg/util/csi/util_test.go new file mode 100644 index 000000000..a9b0a37aa --- /dev/null +++ b/pkg/util/csi/util_test.go @@ -0,0 +1,36 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package csi + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/vmware-tanzu/velero/pkg/features" +) + +func TestCSIFeatureNotEnabledAndPluginIsFromCSI(t *testing.T) { + + features.NewFeatureFlagSet("EnableCSI") + require.False(t, ShouldSkipAction("abc")) + require.False(t, ShouldSkipAction("velero.io/csi-pvc-backupper")) + + features.NewFeatureFlagSet("") + require.True(t, ShouldSkipAction("velero.io/csi-pvc-backupper")) + require.False(t, ShouldSkipAction("abc")) +} diff --git a/pkg/util/csi/volume_snapshot.go b/pkg/util/csi/volume_snapshot.go index 99845d78f..a8ade7acd 100644 --- a/pkg/util/csi/volume_snapshot.go +++ b/pkg/util/csi/volume_snapshot.go @@ -31,6 +31,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/util/boolptr" "github.com/vmware-tanzu/velero/pkg/util/stringptr" + "github.com/vmware-tanzu/velero/pkg/util/stringslice" snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" snapshotter "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/typed/volumesnapshot/v1" @@ -41,7 +42,8 @@ import ( ) const ( - waitInternal = 2 * time.Second + waitInternal = 2 * time.Second + volumeSnapshotContentProtectFinalizer = "velero.io/volume-snapshot-content-protect-finalizer" ) // WaitVolumeSnapshotReady waits a VS to become ready to use until the timeout reaches @@ -97,36 +99,17 @@ func GetVolumeSnapshotContentForVolumeSnapshot(volSnap *snapshotv1api.VolumeSnap return vsc, nil } -// RetainVSC updates the VSC's deletion policy to Retain and return the update VSC +// RetainVSC updates the VSC's deletion policy to Retain and add a finalier and then return the update VSC func RetainVSC(ctx context.Context, snapshotClient snapshotter.SnapshotV1Interface, vsc *snapshotv1api.VolumeSnapshotContent) (*snapshotv1api.VolumeSnapshotContent, error) { if vsc.Spec.DeletionPolicy == snapshotv1api.VolumeSnapshotContentRetain { return vsc, nil } - origBytes, err := json.Marshal(vsc) - if err != nil { - return nil, errors.Wrap(err, "error marshaling original VSC") - } - updated := vsc.DeepCopy() - updated.Spec.DeletionPolicy = snapshotv1api.VolumeSnapshotContentRetain - - updatedBytes, err := json.Marshal(updated) - if err != nil { - return nil, errors.Wrap(err, "error marshaling updated VSC") - } - - patchBytes, err := jsonpatch.CreateMergePatch(origBytes, updatedBytes) - if err != nil { - return nil, errors.Wrap(err, "error creating json merge patch for VSC") - } - - retained, err := snapshotClient.VolumeSnapshotContents().Patch(ctx, vsc.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}) - if err != nil { - return nil, errors.Wrap(err, "error patching VSC") - } - - return retained, nil + return patchVSC(ctx, snapshotClient, vsc, func(updated *snapshotv1api.VolumeSnapshotContent) { + updated.Spec.DeletionPolicy = snapshotv1api.VolumeSnapshotContentRetain + updated.Finalizers = append(updated.Finalizers, volumeSnapshotContentProtectFinalizer) + }) } // DeleteVolumeSnapshotContentIfAny deletes a VSC by name if it exists, and log an error when the deletion fails @@ -169,11 +152,35 @@ func EnsureDeleteVS(ctx context.Context, snapshotClient snapshotter.SnapshotV1In return nil } +func RemoveVSCProtect(ctx context.Context, snapshotClient snapshotter.SnapshotV1Interface, vscName string, timeout time.Duration) error { + err := wait.PollImmediate(waitInternal, timeout, func() (bool, error) { + vsc, err := snapshotClient.VolumeSnapshotContents().Get(ctx, vscName, metav1.GetOptions{}) + if err != nil { + return false, errors.Wrapf(err, "error to get VolumeSnapshotContent %s", vscName) + } + + vsc.Finalizers = stringslice.Except(vsc.Finalizers, volumeSnapshotContentProtectFinalizer) + + _, err = snapshotClient.VolumeSnapshotContents().Update(ctx, vsc, metav1.UpdateOptions{}) + if err == nil { + return true, nil + } + + if !apierrors.IsConflict(err) { + return false, errors.Wrapf(err, "error to update VolumeSnapshotContent %s", vscName) + } + + return false, nil + }) + + return err +} + // EnsureDeleteVSC asserts the existence of a VSC by name, deletes it and waits for its disappearance and returns errors on any failure func EnsureDeleteVSC(ctx context.Context, snapshotClient snapshotter.SnapshotV1Interface, vscName string, timeout time.Duration) error { err := snapshotClient.VolumeSnapshotContents().Delete(ctx, vscName, metav1.DeleteOptions{}) - if err != nil { + if err != nil && !apierrors.IsNotFound(err) { return errors.Wrap(err, "error to delete volume snapshot content") } @@ -208,3 +215,31 @@ func DeleteVolumeSnapshotIfAny(ctx context.Context, snapshotClient snapshotter.S } } } + +func patchVSC(ctx context.Context, snapshotClient snapshotter.SnapshotV1Interface, + vsc *snapshotv1api.VolumeSnapshotContent, updateFunc func(*snapshotv1api.VolumeSnapshotContent)) (*snapshotv1api.VolumeSnapshotContent, error) { + origBytes, err := json.Marshal(vsc) + if err != nil { + return nil, errors.Wrap(err, "error marshaling original VSC") + } + + updated := vsc.DeepCopy() + updateFunc(updated) + + updatedBytes, err := json.Marshal(updated) + if err != nil { + return nil, errors.Wrap(err, "error marshaling updated VSC") + } + + patchBytes, err := jsonpatch.CreateMergePatch(origBytes, updatedBytes) + if err != nil { + return nil, errors.Wrap(err, "error creating json merge patch for VSC") + } + + patched, err := snapshotClient.VolumeSnapshotContents().Patch(ctx, vsc.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}) + if err != nil { + return nil, errors.Wrap(err, "error patching VSC") + } + + return patched, nil +} diff --git a/pkg/util/csi/volume_snapshot_test.go b/pkg/util/csi/volume_snapshot_test.go index 43ed9a316..4fbac34f5 100644 --- a/pkg/util/csi/volume_snapshot_test.go +++ b/pkg/util/csi/volume_snapshot_test.go @@ -34,6 +34,8 @@ import ( "github.com/vmware-tanzu/velero/pkg/util/stringptr" velerotest "github.com/vmware-tanzu/velero/pkg/test" + + apierrors "k8s.io/apimachinery/pkg/api/errors" ) type reactor struct { @@ -364,9 +366,23 @@ func TestEnsureDeleteVSC(t *testing.T) { err string }{ { - name: "delete fail", + name: "delete fail on VSC not found", vscName: "fake-vsc", - err: "error to delete volume snapshot content: volumesnapshotcontents.snapshot.storage.k8s.io \"fake-vsc\" not found", + }, + { + name: "delete fail on others", + vscName: "fake-vsc", + clientObj: []runtime.Object{vscObj}, + reactors: []reactor{ + { + verb: "delete", + resource: "volumesnapshotcontents", + reactorFunc: func(action clientTesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("fake-delete-error") + }, + }, + }, + err: "error to delete volume snapshot content: fake-delete-error", }, { name: "wait fail", @@ -399,7 +415,7 @@ func TestEnsureDeleteVSC(t *testing.T) { } err := EnsureDeleteVSC(context.Background(), fakeSnapshotClient.SnapshotV1(), test.vscName, time.Millisecond) - if err != nil { + if test.err != "" { assert.EqualError(t, err, test.err) } else { assert.NoError(t, err) @@ -601,7 +617,8 @@ func TestRetainVSC(t *testing.T) { clientObj: []runtime.Object{vscObj}, updated: &snapshotv1api.VolumeSnapshotContent{ ObjectMeta: metav1.ObjectMeta{ - Name: "fake-vsc", + Name: "fake-vsc", + Finalizers: []string{volumeSnapshotContentProtectFinalizer}, }, Spec: snapshotv1api.VolumeSnapshotContentSpec{ DeletionPolicy: snapshotv1api.VolumeSnapshotContentRetain, @@ -634,3 +651,98 @@ func TestRetainVSC(t *testing.T) { }) } } + +func TestRemoveVSCProtect(t *testing.T) { + vscObj := &snapshotv1api.VolumeSnapshotContent{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fake-vsc", + Finalizers: []string{volumeSnapshotContentProtectFinalizer}, + }, + } + + tests := []struct { + name string + clientObj []runtime.Object + reactors []reactor + vsc string + updated *snapshotv1api.VolumeSnapshotContent + timeout time.Duration + err string + }{ + { + name: "get vsc error", + vsc: "fake-vsc", + err: "error to get VolumeSnapshotContent fake-vsc: volumesnapshotcontents.snapshot.storage.k8s.io \"fake-vsc\" not found", + }, + { + name: "update vsc fail", + vsc: "fake-vsc", + clientObj: []runtime.Object{vscObj}, + reactors: []reactor{ + { + verb: "update", + resource: "volumesnapshotcontents", + reactorFunc: func(action clientTesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, errors.New("fake-update-error") + }, + }, + }, + err: "error to update VolumeSnapshotContent fake-vsc: fake-update-error", + }, + { + name: "update vsc timeout", + vsc: "fake-vsc", + clientObj: []runtime.Object{vscObj}, + reactors: []reactor{ + { + verb: "update", + resource: "volumesnapshotcontents", + reactorFunc: func(action clientTesting.Action) (handled bool, ret runtime.Object, err error) { + return true, nil, &apierrors.StatusError{ErrStatus: metav1.Status{ + Reason: metav1.StatusReasonConflict, + }} + }, + }, + }, + timeout: time.Second, + err: "timed out waiting for the condition", + }, + { + name: "succeed", + vsc: "fake-vsc", + clientObj: []runtime.Object{vscObj}, + timeout: time.Second, + updated: &snapshotv1api.VolumeSnapshotContent{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fake-vsc", + Finalizers: []string{}, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + fakeSnapshotClient := snapshotFake.NewSimpleClientset(test.clientObj...) + + for _, reactor := range test.reactors { + fakeSnapshotClient.Fake.PrependReactor(reactor.verb, reactor.resource, reactor.reactorFunc) + } + + err := RemoveVSCProtect(context.Background(), fakeSnapshotClient.SnapshotV1(), test.vsc, test.timeout) + + if len(test.err) == 0 { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, test.err) + } + + if test.updated != nil { + updated, err := fakeSnapshotClient.SnapshotV1().VolumeSnapshotContents().Get(context.Background(), test.vsc, metav1.GetOptions{}) + assert.NoError(t, err) + + assert.Equal(t, test.updated.Finalizers, updated.Finalizers) + } + }) + } +} diff --git a/pkg/util/encode/encode.go b/pkg/util/encode/encode.go index 88703ad6e..9704c1a2e 100644 --- a/pkg/util/encode/encode.go +++ b/pkg/util/encode/encode.go @@ -25,9 +25,10 @@ import ( "github.com/pkg/errors" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/scheme" + "github.com/vmware-tanzu/velero/pkg/util" ) // Encode converts the provided object to the specified format @@ -56,8 +57,11 @@ func To(obj runtime.Object, format string, w io.Writer) error { // Only objects registered in the velero scheme, or objects with their TypeMeta set will have valid encoders. func EncoderFor(format string, obj runtime.Object) (runtime.Encoder, error) { var encoder runtime.Encoder + + codecFactory := serializer.NewCodecFactory(util.VeleroScheme) + desiredMediaType := fmt.Sprintf("application/%s", format) - serializerInfo, found := runtime.SerializerInfoForMediaType(scheme.Codecs.SupportedMediaTypes(), desiredMediaType) + serializerInfo, found := runtime.SerializerInfoForMediaType(codecFactory.SupportedMediaTypes(), desiredMediaType) if !found { return nil, errors.Errorf("unable to locate an encoder for %q", desiredMediaType) } @@ -69,7 +73,7 @@ func EncoderFor(format string, obj runtime.Object) (runtime.Encoder, error) { if !obj.GetObjectKind().GroupVersionKind().Empty() { return encoder, nil } - encoder = scheme.Codecs.EncoderForVersion(encoder, v1.SchemeGroupVersion) + encoder = codecFactory.EncoderForVersion(encoder, v1.SchemeGroupVersion) return encoder, nil } diff --git a/pkg/util/podvolume/pod_volume.go b/pkg/util/podvolume/pod_volume.go index 542e15297..94e969b3b 100644 --- a/pkg/util/podvolume/pod_volume.go +++ b/pkg/util/podvolume/pod_volume.go @@ -46,7 +46,7 @@ func GetVolumesByPod(pod *corev1api.Pod, defaultVolumesToFsBackup bool) ([]strin if pv.Secret != nil { continue } - // don't backup volumes mounting config maps. Config maps will be backed up separately. + // don't backup volumes mounting ConfigMaps. ConfigMaps will be backed up separately. if pv.ConfigMap != nil { continue } diff --git a/pkg/util/podvolume/pod_volume_test.go b/pkg/util/podvolume/pod_volume_test.go index 67ce9bb57..b4898e6b4 100644 --- a/pkg/util/podvolume/pod_volume_test.go +++ b/pkg/util/podvolume/pod_volume_test.go @@ -222,7 +222,7 @@ func TestGetVolumesByPod(t *testing.T) { }, }, { - name: "should exclude volumes mounting config maps", + name: "should exclude volumes mounting ConfigMaps", defaultVolumesToFsBackup: true, pod: &corev1api.Pod{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/util/scheme.go b/pkg/util/scheme.go new file mode 100644 index 000000000..7e5703fac --- /dev/null +++ b/pkg/util/scheme.go @@ -0,0 +1,19 @@ +package util + +import ( + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + + v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1" +) + +var VeleroScheme = runtime.NewScheme() + +func init() { + localSchemeBuilder := runtime.SchemeBuilder{ + v1.AddToScheme, + v2alpha1.AddToScheme, + } + utilruntime.Must(localSchemeBuilder.AddToScheme(VeleroScheme)) +} diff --git a/pkg/volume/volume_info_common.go b/pkg/volume/volume_info_common.go new file mode 100644 index 000000000..cfca31df9 --- /dev/null +++ b/pkg/volume/volume_info_common.go @@ -0,0 +1,147 @@ +/* +Copyright 2018 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package volume + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +type VolumeBackupMethod string + +const ( + NativeSnapshot VolumeBackupMethod = "NativeSnapshot" + PodVolumeBackup VolumeBackupMethod = "PodVolumeBackup" + CSISnapshot VolumeBackupMethod = "CSISnapshot" +) + +type VolumeInfoVersion struct { + Version string `json:"version"` +} + +type VolumeInfos struct { + VolumeInfos []VolumeInfo `json:"volumeInfos"` +} + +type VolumeInfo struct { + // The PVC's name. + PVCName string `json:"pvcName,omitempty"` + + // The PVC's namespace + PVCNamespace string `json:"pvcNamespace,omitempty"` + + // The PV name. + PVName string `json:"pvName,omitempty"` + + // The way the volume data is backed up. The valid value includes `VeleroNativeSnapshot`, `PodVolumeBackup` and `CSISnapshot`. + BackupMethod VolumeBackupMethod `json:"backupMethod,omitempty"` + + // Whether the volume's snapshot data is moved to specified storage. + SnapshotDataMoved bool `json:"snapshotDataMoved"` + + // Whether the local snapshot is preserved after snapshot is moved. + PreserveLocalSnapshot bool `json:"preserveLocalSnapshot"` + + // Whether the Volume is skipped in this backup. + Skipped bool `json:"skipped"` + + // The reason for the volume is skipped in the backup. + SkippedReason string `json:"skippedReason,omitempty"` + + // Snapshot starts timestamp. + StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"` + + // The Async Operation's ID. + OperationID string `json:"operationID,omitempty"` + + CSISnapshotInfo CSISnapshotInfo `json:"csiSnapshotInfo,omitempty"` + SnapshotDataMovementInfo SnapshotDataMovementInfo `json:"snapshotDataMovementInfo,omitempty"` + NativeSnapshotInfo NativeSnapshotInfo `json:"nativeSnapshotInfo,omitempty"` + PVBInfo PodVolumeBackupInfo `json:"pvbInfo,omitempty"` +} + +// CSISnapshotInfo is used for displaying the CSI snapshot status +type CSISnapshotInfo struct { + // It's the storage provider's snapshot ID for CSI. + SnapshotHandle string `json:"snapshotHandle"` + + // The snapshot corresponding volume size. Some of the volume backup methods cannot retrieve the data by current design, for example, the Velero native snapshot. + Size int64 `json:"size"` + + // The name of the CSI driver. + Driver string `json:"driver"` + + // The name of the VolumeSnapshotContent. + VSCName string `json:"vscName"` +} + +// SnapshotDataMovementInfo is used for displaying the snapshot data mover status. +type SnapshotDataMovementInfo struct { + // The data mover used by the backup. The valid values are `velero` and ``(equals to `velero`). + DataMover string `json:"dataMover"` + + // The type of the uploader that uploads the snapshot data. The valid values are `kopia` and `restic`. It's useful for file-system backup and snapshot data mover. + UploaderType string `json:"uploaderType"` + + // The name or ID of the snapshot associated object(SAO). + // SAO is used to support local snapshots for the snapshot data mover, + // e.g. it could be a VolumeSnapshot for CSI snapshot data movement. + RetainedSnapshot string `json:"retainedSnapshot"` + + // It's the filesystem repository's snapshot ID. + SnapshotHandle string `json:"snapshotHandle"` +} + +// NativeSnapshotInfo is used for displaying the Velero native snapshot status. +// A Velero Native Snapshot is a cloud storage snapshot taken by the Velero native +// plugins, e.g. velero-plugin-for-aws, velero-plugin-for-gcp, and +// velero-plugin-for-microsoft-azure. +type NativeSnapshotInfo struct { + // It's the storage provider's snapshot ID for the Velero-native snapshot. + SnapshotHandle string `json:"snapshotHandle"` + + // The snapshot corresponding volume size. Some of the volume backup methods cannot retrieve the data by current design, for example, the Velero native snapshot. + Size int64 `json:"size"` + + // The cloud provider snapshot volume type. + VolumeType string `json:"volumeType"` + + // The cloud provider snapshot volume's availability zones. + VolumeAZ string `json:"volumeAZ"` + + // The cloud provider snapshot volume's IOPS. + IOPS string `json:"iops"` +} + +// PodVolumeBackupInfo is used for displaying the PodVolumeBackup snapshot status. +type PodVolumeBackupInfo struct { + // It's the file-system uploader's snapshot ID for PodVolumeBackup. + SnapshotHandle string `json:"snapshotHandle"` + + // The snapshot corresponding volume size. Some of the volume backup methods cannot retrieve the data by current design, for example, the Velero native snapshot. + Size int64 `json:"size"` + + // The type of the uploader that uploads the data. The valid values are `kopia` and `restic`. It's useful for file-system backup and snapshot data mover. + UploaderType string `json:"uploaderType"` + + // The PVC's corresponding volume name used by Pod + // https://github.com/kubernetes/kubernetes/blob/e4b74dd12fa8cb63c174091d5536a10b8ec19d34/pkg/apis/core/types.go#L48 + VolumeName string `json:"volumeName"` + + // The Pod name mounting this PVC. The format should be /. + PodName string `json:"podName"` + + // The PVB-taken k8s node's name. + NodeName string `json:"nodeName"` +} diff --git a/site/content/docs/main/api-types/schedule.md b/site/content/docs/main/api-types/schedule.md index c2c55f6b7..eb8e8fbd8 100644 --- a/site/content/docs/main/api-types/schedule.md +++ b/site/content/docs/main/api-types/schedule.md @@ -41,6 +41,11 @@ spec: # CSI VolumeSnapshot status turns to ReadyToUse during creation, before # returning error as timeout. The default value is 10 minute. csiSnapshotTimeout: 10m + # resourcePolicy specifies the referenced resource policies that backup should follow + # optional + resourcePolicy: + kind: configmap + name: resource-policy-configmap # Array of namespaces to include in the scheduled backup. If unspecified, all namespaces are included. # Optional. includedNamespaces: diff --git a/site/content/docs/main/contributions/tencent-config.md b/site/content/docs/main/contributions/tencent-config.md index 5651ae24e..4ad54aaa8 100644 --- a/site/content/docs/main/contributions/tencent-config.md +++ b/site/content/docs/main/contributions/tencent-config.md @@ -17,7 +17,7 @@ You can deploy Velero on Tencent [TKE](https://cloud.tencent.com/document/produc Create an object bucket for Velero to store backups in the Tencent Cloud COS console. For how to create, please refer to Tencent Cloud COS [Create a bucket](https://cloud.tencent.com/document/product/436/13309) usage instructions. -Set access to the bucket through the object storage console, the bucket needs to be **read** and **written**, so the account is granted data reading, data writing permissions. For how to configure, see the [permission access settings](https://cloud.tencent.com/document/product/436/13315.E5.8D.95.E4.B8.AA.E6.8E.88.E6.9D.83) Tencent user instructions. +Set access to the bucket through the object storage console, the bucket needs to be **read** and **written**, so the account is granted data reading, data writing permissions. For how to configure, see the [permission access settings](https://cloud.tencent.com/document/product/436/13315) Tencent user instructions. ## Get bucket access credentials diff --git a/site/content/docs/main/csi-snapshot-data-movement.md b/site/content/docs/main/csi-snapshot-data-movement.md index c9ff42d3e..b6a03ad12 100644 --- a/site/content/docs/main/csi-snapshot-data-movement.md +++ b/site/content/docs/main/csi-snapshot-data-movement.md @@ -248,6 +248,7 @@ that anyone who has access to your backup storage can decrypt your backup data** to the backup storage appropriately. - [Velero built-in data mover] Even though the backup data could be incrementally preserved, for a single file data, Velero built-in data mover leverages on deduplication to find the difference to be saved. This means that large files (such as ones storing a database) will take a long time to scan for data deduplication, even if the actual difference is small. - [Velero built-in data mover] You may need to [customize the resource limits][11] to make sure backups complete successfully for massive small files or large backup size cases, for more details refer to [Velero file system level backup performance guide][12]. +- The block mode is supported by the Kopia uploader, but it only supports non-Windows platforms, because the block mode code invokes some system calls that are not present in the Windows platform. ## Troubleshooting diff --git a/site/content/docs/main/file-system-backup.md b/site/content/docs/main/file-system-backup.md index e5be507e3..0580b55e9 100644 --- a/site/content/docs/main/file-system-backup.md +++ b/site/content/docs/main/file-system-backup.md @@ -63,7 +63,7 @@ repository on AWS S3, the full backup repo path for namespace1 would be `https:/ for namespace2 would be `https://s3-us-west-2.amazonaws.com/bucket/kopia/ns2`. There may be additional installation steps depending on the cloud provider plugin you are using. You should refer to the -[plugin specific documentation](supported-providers.md) for the must up to date information. +[plugin specific documentation](supported-providers.md) for the most up to date information. **Note:** Currently, Velero creates a secret named `velero-repo-credentials` in the velero install namespace, containing a default backup repository password. You can update the secret with your own password encoded as base64 prior to the first backup (i.e., FS Backup, data mover) targeting to the backup repository. The value of the key to update is @@ -185,7 +185,7 @@ The following sections provide more details on the two approaches. In this approach, Velero will back up all pod volumes using FSB with the exception of: -- Volumes mounting the default service account token, Kubernetes secrets, and config maps +- Volumes mounting the default service account token, Kubernetes Secrets, and ConfigMaps - Hostpath volumes It is possible to exclude volumes from being backed up using the `backup.velero.io/backup-volumes-excludes` diff --git a/site/content/docs/main/restore-reference.md b/site/content/docs/main/restore-reference.md index d50a12a43..2f5a18912 100644 --- a/site/content/docs/main/restore-reference.md +++ b/site/content/docs/main/restore-reference.md @@ -280,21 +280,23 @@ There are two ways to delete a Restore object: 1. Deleting with `velero restore delete` will delete the Custom Resource representing the restore, along with its individual log and results files. It will not delete any objects that were created by the restore in your cluster. 2. Deleting with `kubectl -n velero delete restore` will delete the Custom Resource representing the restore. It will not delete restore log or results files from object storage, or any objects that were created during the restore in your cluster. -## What happens to NodePorts when restoring Services +## What happens to NodePorts and HealthCheckNodePort when restoring Services -During a restore, Velero deletes **Auto assigned** NodePorts by default and Services get new **auto assigned** nodePorts after restore. +During a restore, Velero deletes **Auto assigned** NodePorts and HealthCheckNodePort by default and Services get new **auto assigned** nodePorts and healthCheckNodePort after restore. -Velero auto detects **explicitly specified** NodePorts using **`last-applied-config`** annotation and they are **preserved** after restore. NodePorts can be explicitly specified as `.spec.ports[*].nodePort` field on Service definition. +Velero auto detects **explicitly specified** NodePorts using **`last-applied-config`** annotation and **`managedFields`**. They are **preserved** after restore. NodePorts can be explicitly specified as `.spec.ports[*].nodePort` field on Service definition. -### Always Preserve NodePorts +Velero will do the same to the `HealthCheckNodePort` as `NodePorts`. -It is not always possible to set nodePorts explicitly on some big clusters because of operational complexity. As the Kubernetes [NodePort documentation](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport) states, "if you want a specific port number, you can specify a value in the `nodePort` field. The control plane will either allocate you that port or report that the API transaction failed. This means that you need to take care of possible port collisions yourself. You also have to use a valid port number, one that's inside the range configured for NodePort use."" +### Always Preserve NodePorts and HealthCheckNodePort + +It is not always possible to set nodePorts and healthCheckNodePort explicitly on some big clusters because of operational complexity. As the Kubernetes [NodePort documentation](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport) states, "if you want a specific port number, you can specify a value in the `nodePort` field. The control plane will either allocate you that port or report that the API transaction failed. This means that you need to take care of possible port collisions yourself. You also have to use a valid port number, one that's inside the range configured for NodePort use."" The clusters which are not explicitly specifying nodePorts may still need to restore original NodePorts in the event of a disaster. Auto assigned nodePorts are typically defined on Load Balancers located in front of cluster. Changing all these nodePorts on Load Balancers is another operation complexity you are responsible for updating after disaster if nodePorts are changed. -Use the `velero restore create ` command's `--preserve-nodeports` flag to preserve Service nodePorts always, regardless of whether nodePorts are explicitly specified or not. This flag is used for preserving the original nodePorts from a backup and can be used as `--preserve-nodeports` or `--preserve-nodeports=true`. If this flag is present, Velero will not remove the nodePorts when restoring a Service, but will try to use the nodePorts from the backup. +Use the `velero restore create ` command's `--preserve-nodeports` flag to preserve Service nodePorts and healthCheckNodePort always, regardless of whether nodePorts are explicitly specified or not. This flag is used for preserving the original nodePorts and healthCheckNodePort from a backup and can be used as `--preserve-nodeports` or `--preserve-nodeports=true`. If this flag is present, Velero will not remove the nodePorts and healthCheckNodePort when restoring a Service, but will try to use the nodePorts from the backup. -Trying to preserve nodePorts may cause port conflicts when restoring on situations below: +Trying to preserve nodePorts and healthCheckNodePort may cause port conflicts when restoring on situations below: - If the nodePort from the backup is already allocated on the target cluster then Velero prints error log as shown below and continues the restore operation. diff --git a/site/content/docs/main/restore-resource-modifiers.md b/site/content/docs/main/restore-resource-modifiers.md index 0d59fad5a..0c1f2f217 100644 --- a/site/content/docs/main/restore-resource-modifiers.md +++ b/site/content/docs/main/restore-resource-modifiers.md @@ -105,3 +105,83 @@ resourceModifierRules: - Update a container's image using a json patch with positional arrays kubectl patch pod valid-pod -type='json' -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"new image"}]' - Before creating the resource modifier yaml, you can try it out using kubectl patch command. The same commands should work as it is. + +#### JSON Merge Patch +You can modify a resource using JSON Merge Patch +```yaml +version: v1 +resourceModifierRules: +- conditions: + groupResource: pods + namespaces: + - ns1 + mergePatches: + - patchData: | + { + "metadata": { + "annotations": { + "foo": null + } + } + } +``` +- The above configmap will apply the Merge Patch to all the pods in namespace ns1 and remove the annotation `foo` from the pods. +- Both json and yaml format are supported for the patchData. +- For more details, please refer to [this doc](https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/) + +#### Strategic Merge Patch +You can modify a resource using Strategic Merge Patch +```yaml +version: v1 +resourceModifierRules: +- conditions: + groupResource: pods + resourceNameRegex: "^my-pod$" + namespaces: + - ns1 + strategicPatches: + - patchData: | + { + "spec": { + "containers": [ + { + "name": "nginx", + "image": "repo2/nginx" + } + ] + } + } +``` +- The above configmap will apply the Strategic Merge Patch to the pod with name my-pod in namespace ns1 and update the image of container nginx to `repo2/nginx`. +- Both json and yaml format are supported for the patchData. +- For more details, please refer to [this doc](https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/) + + +### Conditional Patches in ALL Patch Types +A new field `matches` is added in conditions to support conditional patches. + +Example of matches in conditions +```yaml +version: v1 +resourceModifierRules: +- conditions: + groupResource: persistentvolumeclaims.storage.k8s.io + matches: + - path: "/spec/storageClassName" + value: "premium" + mergePatches: + - patchData: | + { + "metadata": { + "annotations": { + "foo": null + } + } + } +``` +- The above configmap will apply the Merge Patch to all the PVCs in all namespaces with storageClassName premium and remove the annotation `foo` from the PVCs. +- You can specify multiple rules in the `matches` list. The patch will be applied only if all the matches are satisfied. + +### Wildcard Support for GroupResource +The user can specify a wildcard for groupResource in the conditions' struct. This will allow the user to apply the patches for all the resources of a particular group or all resources in all groups. For example, `*.apps` will apply to all the resources in the `apps` group, `*` will apply to all the resources in core group, `*.*` will apply to all the resources in all groups. +- If both `*.groupName` and `namespaces` are specified, the patches will be applied to all the namespaced resources in this group in the specified namespaces and all the cluster resources in this group. \ No newline at end of file diff --git a/site/content/docs/v1.10/contributions/tencent-config.md b/site/content/docs/v1.10/contributions/tencent-config.md index 592808c2d..cc5517360 100644 --- a/site/content/docs/v1.10/contributions/tencent-config.md +++ b/site/content/docs/v1.10/contributions/tencent-config.md @@ -17,7 +17,7 @@ You can deploy Velero on Tencent [TKE](https://cloud.tencent.com/document/produc Create an object bucket for Velero to store backups in the Tencent Cloud COS console. For how to create, please refer to Tencent Cloud COS [Create a bucket](https://cloud.tencent.com/document/product/436/13309) usage instructions. -Set access to the bucket through the object storage console, the bucket needs to be **read** and **written**, so the account is granted data reading, data writing permissions. For how to configure, see the [permission access settings](https://cloud.tencent.com/document/product/436/13315.E5.8D.95.E4.B8.AA.E6.8E.88.E6.9D.83) Tencent user instructions. +Set access to the bucket through the object storage console, the bucket needs to be **read** and **written**, so the account is granted data reading, data writing permissions. For how to configure, see the [permission access settings](https://cloud.tencent.com/document/product/436/13315) Tencent user instructions. ## Get bucket access credentials diff --git a/site/content/docs/v1.10/file-system-backup.md b/site/content/docs/v1.10/file-system-backup.md index b9549ae7a..0ba10c618 100644 --- a/site/content/docs/v1.10/file-system-backup.md +++ b/site/content/docs/v1.10/file-system-backup.md @@ -186,7 +186,7 @@ The following sections provide more details on the two approaches. In this approach, Velero will back up all pod volumes using FSB with the exception of: -- Volumes mounting the default service account token, Kubernetes secrets, and config maps +- Volumes mounting the default service account token, Kubernetes Secrets, and ConfigMaps - Hostpath volumes It is possible to exclude volumes from being backed up using the `backup.velero.io/backup-volumes-excludes` diff --git a/site/content/docs/v1.11/contributions/tencent-config.md b/site/content/docs/v1.11/contributions/tencent-config.md index 5651ae24e..4ad54aaa8 100644 --- a/site/content/docs/v1.11/contributions/tencent-config.md +++ b/site/content/docs/v1.11/contributions/tencent-config.md @@ -17,7 +17,7 @@ You can deploy Velero on Tencent [TKE](https://cloud.tencent.com/document/produc Create an object bucket for Velero to store backups in the Tencent Cloud COS console. For how to create, please refer to Tencent Cloud COS [Create a bucket](https://cloud.tencent.com/document/product/436/13309) usage instructions. -Set access to the bucket through the object storage console, the bucket needs to be **read** and **written**, so the account is granted data reading, data writing permissions. For how to configure, see the [permission access settings](https://cloud.tencent.com/document/product/436/13315.E5.8D.95.E4.B8.AA.E6.8E.88.E6.9D.83) Tencent user instructions. +Set access to the bucket through the object storage console, the bucket needs to be **read** and **written**, so the account is granted data reading, data writing permissions. For how to configure, see the [permission access settings](https://cloud.tencent.com/document/product/436/13315) Tencent user instructions. ## Get bucket access credentials diff --git a/site/content/docs/v1.11/file-system-backup.md b/site/content/docs/v1.11/file-system-backup.md index 2502ae569..4104bebeb 100644 --- a/site/content/docs/v1.11/file-system-backup.md +++ b/site/content/docs/v1.11/file-system-backup.md @@ -186,7 +186,7 @@ The following sections provide more details on the two approaches. In this approach, Velero will back up all pod volumes using FSB with the exception of: -- Volumes mounting the default service account token, Kubernetes secrets, and config maps +- Volumes mounting the default service account token, Kubernetes Secrets, and ConfigMaps - Hostpath volumes It is possible to exclude volumes from being backed up using the `backup.velero.io/backup-volumes-excludes` diff --git a/site/content/docs/v1.12/api-types/schedule.md b/site/content/docs/v1.12/api-types/schedule.md index c2c55f6b7..eb8e8fbd8 100644 --- a/site/content/docs/v1.12/api-types/schedule.md +++ b/site/content/docs/v1.12/api-types/schedule.md @@ -41,6 +41,11 @@ spec: # CSI VolumeSnapshot status turns to ReadyToUse during creation, before # returning error as timeout. The default value is 10 minute. csiSnapshotTimeout: 10m + # resourcePolicy specifies the referenced resource policies that backup should follow + # optional + resourcePolicy: + kind: configmap + name: resource-policy-configmap # Array of namespaces to include in the scheduled backup. If unspecified, all namespaces are included. # Optional. includedNamespaces: diff --git a/site/content/docs/v1.12/contributions/tencent-config.md b/site/content/docs/v1.12/contributions/tencent-config.md index 5651ae24e..4ad54aaa8 100644 --- a/site/content/docs/v1.12/contributions/tencent-config.md +++ b/site/content/docs/v1.12/contributions/tencent-config.md @@ -17,7 +17,7 @@ You can deploy Velero on Tencent [TKE](https://cloud.tencent.com/document/produc Create an object bucket for Velero to store backups in the Tencent Cloud COS console. For how to create, please refer to Tencent Cloud COS [Create a bucket](https://cloud.tencent.com/document/product/436/13309) usage instructions. -Set access to the bucket through the object storage console, the bucket needs to be **read** and **written**, so the account is granted data reading, data writing permissions. For how to configure, see the [permission access settings](https://cloud.tencent.com/document/product/436/13315.E5.8D.95.E4.B8.AA.E6.8E.88.E6.9D.83) Tencent user instructions. +Set access to the bucket through the object storage console, the bucket needs to be **read** and **written**, so the account is granted data reading, data writing permissions. For how to configure, see the [permission access settings](https://cloud.tencent.com/document/product/436/13315) Tencent user instructions. ## Get bucket access credentials diff --git a/site/content/docs/v1.12/csi-snapshot-data-movement.md b/site/content/docs/v1.12/csi-snapshot-data-movement.md index 6705a3145..a50d41aba 100644 --- a/site/content/docs/v1.12/csi-snapshot-data-movement.md +++ b/site/content/docs/v1.12/csi-snapshot-data-movement.md @@ -261,6 +261,7 @@ that anyone who has access to your backup storage can decrypt your backup data** to the backup storage appropriately. - [Velero built-in data mover] Even though the backup data could be incrementally preserved, for a single file data, Velero built-in data mover leverages on deduplication to find the difference to be saved. This means that large files (such as ones storing a database) will take a long time to scan for data deduplication, even if the actual difference is small. - [Velero built-in data mover] You may need to [customize the resource limits][11] to make sure backups complete successfully for massive small files or large backup size cases, for more details refer to [Velero file system level backup performance guide][12]. +- The block mode is supported by the Kopia uploader, but it only supports non-Windows platforms, because the block mode code invokes some system calls that are not present in the Windows platform. ## Troubleshooting diff --git a/site/content/docs/v1.12/custom-plugins.md b/site/content/docs/v1.12/custom-plugins.md index bac500db5..351d8ff30 100644 --- a/site/content/docs/v1.12/custom-plugins.md +++ b/site/content/docs/v1.12/custom-plugins.md @@ -37,7 +37,7 @@ When naming your plugin, keep in mind that the full name needs to conform to the - have two parts, prefix + name, separated by '/' - none of the above parts can be empty - the prefix is a valid DNS subdomain name -- a plugin with the same prefix + name cannot not already exist +- a plugin with the same prefix + name cannot already exist ### Some examples: diff --git a/site/content/docs/v1.12/file-system-backup.md b/site/content/docs/v1.12/file-system-backup.md index ab87a3bef..6799a4006 100644 --- a/site/content/docs/v1.12/file-system-backup.md +++ b/site/content/docs/v1.12/file-system-backup.md @@ -199,7 +199,7 @@ The following sections provide more details on the two approaches. In this approach, Velero will back up all pod volumes using FSB with the exception of: -- Volumes mounting the default service account token, Kubernetes secrets, and config maps +- Volumes mounting the default service account token, Kubernetes Secrets, and ConfigMaps - Hostpath volumes It is possible to exclude volumes from being backed up using the `backup.velero.io/backup-volumes-excludes` diff --git a/site/content/docs/v1.5/contributions/tencent-config.md b/site/content/docs/v1.5/contributions/tencent-config.md index b50b08556..630b3dd67 100644 --- a/site/content/docs/v1.5/contributions/tencent-config.md +++ b/site/content/docs/v1.5/contributions/tencent-config.md @@ -17,7 +17,7 @@ You can deploy Velero on Tencent [TKE](https://cloud.tencent.com/document/produc Create an object bucket for Velero to store backups in the Tencent Cloud COS console. For how to create, please refer to Tencent Cloud COS [Create a bucket](https://cloud.tencent.com/document/product/436/13309) usage instructions. -Set access to the bucket through the object storage console, the bucket needs to be **read** and **written**, so the account is granted data reading, data writing permissions. For how to configure, see the [permission access settings](https://cloud.tencent.com/document/product/436/13315.E5.8D.95.E4.B8.AA.E6.8E.88.E6.9D.83) Tencent user instructions. +Set access to the bucket through the object storage console, the bucket needs to be **read** and **written**, so the account is granted data reading, data writing permissions. For how to configure, see the [permission access settings](https://cloud.tencent.com/document/product/436/13315) Tencent user instructions. ## Get bucket access credentials diff --git a/site/content/docs/v1.6/contributions/tencent-config.md b/site/content/docs/v1.6/contributions/tencent-config.md index 50edfce49..ca7eae7ac 100644 --- a/site/content/docs/v1.6/contributions/tencent-config.md +++ b/site/content/docs/v1.6/contributions/tencent-config.md @@ -17,7 +17,7 @@ You can deploy Velero on Tencent [TKE](https://cloud.tencent.com/document/produc Create an object bucket for Velero to store backups in the Tencent Cloud COS console. For how to create, please refer to Tencent Cloud COS [Create a bucket](https://cloud.tencent.com/document/product/436/13309) usage instructions. -Set access to the bucket through the object storage console, the bucket needs to be **read** and **written**, so the account is granted data reading, data writing permissions. For how to configure, see the [permission access settings](https://cloud.tencent.com/document/product/436/13315.E5.8D.95.E4.B8.AA.E6.8E.88.E6.9D.83) Tencent user instructions. +Set access to the bucket through the object storage console, the bucket needs to be **read** and **written**, so the account is granted data reading, data writing permissions. For how to configure, see the [permission access settings](https://cloud.tencent.com/document/product/436/13315) Tencent user instructions. ## Get bucket access credentials diff --git a/site/content/docs/v1.7/contributions/tencent-config.md b/site/content/docs/v1.7/contributions/tencent-config.md index 50edfce49..ca7eae7ac 100644 --- a/site/content/docs/v1.7/contributions/tencent-config.md +++ b/site/content/docs/v1.7/contributions/tencent-config.md @@ -17,7 +17,7 @@ You can deploy Velero on Tencent [TKE](https://cloud.tencent.com/document/produc Create an object bucket for Velero to store backups in the Tencent Cloud COS console. For how to create, please refer to Tencent Cloud COS [Create a bucket](https://cloud.tencent.com/document/product/436/13309) usage instructions. -Set access to the bucket through the object storage console, the bucket needs to be **read** and **written**, so the account is granted data reading, data writing permissions. For how to configure, see the [permission access settings](https://cloud.tencent.com/document/product/436/13315.E5.8D.95.E4.B8.AA.E6.8E.88.E6.9D.83) Tencent user instructions. +Set access to the bucket through the object storage console, the bucket needs to be **read** and **written**, so the account is granted data reading, data writing permissions. For how to configure, see the [permission access settings](https://cloud.tencent.com/document/product/436/13315) Tencent user instructions. ## Get bucket access credentials diff --git a/site/content/docs/v1.7/restic.md b/site/content/docs/v1.7/restic.md index 8873c1ff5..6fa1543db 100644 --- a/site/content/docs/v1.7/restic.md +++ b/site/content/docs/v1.7/restic.md @@ -183,7 +183,7 @@ The following sections provide more details on the two approaches. In this approach, Velero will back up all pod volumes using restic with the exception of: -- Volumes mounting the default service account token, kubernetes secrets, and config maps +- Volumes mounting the default service account token, Kubernetes Secrets, and ConfigMaps - Hostpath volumes It is possible to exclude volumes from being backed up using the `backup.velero.io/backup-volumes-excludes` annotation on the pod. diff --git a/site/content/docs/v1.8/contributions/tencent-config.md b/site/content/docs/v1.8/contributions/tencent-config.md index 50edfce49..ca7eae7ac 100644 --- a/site/content/docs/v1.8/contributions/tencent-config.md +++ b/site/content/docs/v1.8/contributions/tencent-config.md @@ -17,7 +17,7 @@ You can deploy Velero on Tencent [TKE](https://cloud.tencent.com/document/produc Create an object bucket for Velero to store backups in the Tencent Cloud COS console. For how to create, please refer to Tencent Cloud COS [Create a bucket](https://cloud.tencent.com/document/product/436/13309) usage instructions. -Set access to the bucket through the object storage console, the bucket needs to be **read** and **written**, so the account is granted data reading, data writing permissions. For how to configure, see the [permission access settings](https://cloud.tencent.com/document/product/436/13315.E5.8D.95.E4.B8.AA.E6.8E.88.E6.9D.83) Tencent user instructions. +Set access to the bucket through the object storage console, the bucket needs to be **read** and **written**, so the account is granted data reading, data writing permissions. For how to configure, see the [permission access settings](https://cloud.tencent.com/document/product/436/13315) Tencent user instructions. ## Get bucket access credentials diff --git a/site/content/docs/v1.9/contributions/tencent-config.md b/site/content/docs/v1.9/contributions/tencent-config.md index 11b0762c0..223c3bd31 100644 --- a/site/content/docs/v1.9/contributions/tencent-config.md +++ b/site/content/docs/v1.9/contributions/tencent-config.md @@ -17,7 +17,7 @@ You can deploy Velero on Tencent [TKE](https://cloud.tencent.com/document/produc Create an object bucket for Velero to store backups in the Tencent Cloud COS console. For how to create, please refer to Tencent Cloud COS [Create a bucket](https://cloud.tencent.com/document/product/436/13309) usage instructions. -Set access to the bucket through the object storage console, the bucket needs to be **read** and **written**, so the account is granted data reading, data writing permissions. For how to configure, see the [permission access settings](https://cloud.tencent.com/document/product/436/13315.E5.8D.95.E4.B8.AA.E6.8E.88.E6.9D.83) Tencent user instructions. +Set access to the bucket through the object storage console, the bucket needs to be **read** and **written**, so the account is granted data reading, data writing permissions. For how to configure, see the [permission access settings](https://cloud.tencent.com/document/product/436/13315) Tencent user instructions. ## Get bucket access credentials diff --git a/site/content/docs/v1.9/restic.md b/site/content/docs/v1.9/restic.md index 492738af1..c1cd8aeb3 100644 --- a/site/content/docs/v1.9/restic.md +++ b/site/content/docs/v1.9/restic.md @@ -186,7 +186,7 @@ The following sections provide more details on the two approaches. In this approach, Velero will back up all pod volumes using Restic with the exception of: -- Volumes mounting the default service account token, Kubernetes secrets, and config maps +- Volumes mounting the default service account token, Kubernetes Secrets, and ConfigMaps - Hostpath volumes It is possible to exclude volumes from being backed up using the `backup.velero.io/backup-volumes-excludes` annotation on the pod. diff --git a/site/content/posts/2020-09-16-Velero-1.5-For-And-By-Community.md b/site/content/posts/2020-09-16-Velero-1.5-For-And-By-Community.md index 0d6938ebf..aa19bd536 100644 --- a/site/content/posts/2020-09-16-Velero-1.5-For-And-By-Community.md +++ b/site/content/posts/2020-09-16-Velero-1.5-For-And-By-Community.md @@ -33,7 +33,7 @@ With the release of 1.5, Velero now has the ability to backup all pod volumes us 1. Volumes mounting the default service account token 1. Hostpath volumes -1. Volumes mounting Kubernetes secrets and config maps. +1. Volumes mounting Kubernetes Secrets and ConfigMaps. You can enable this feature on a per backup basis or as a default setting for all Velero backups. Read more about this feature on our [restic integration](https://velero.io/docs/v1.5/restic/) page on our documentation website. diff --git a/test/pkg/client/factory.go b/test/pkg/client/factory.go index 3c0911177..340cba587 100644 --- a/test/pkg/client/factory.go +++ b/test/pkg/client/factory.go @@ -34,16 +34,12 @@ import ( velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" velerov2alpha1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v2alpha1" - clientset "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" ) // Factory knows how to create a VeleroClient and Kubernetes client. type Factory interface { // BindFlags binds common flags (--kubeconfig, --namespace) to the passed-in FlagSet. BindFlags(flags *pflag.FlagSet) - // Client returns a VeleroClient. It uses the following priority to specify the cluster - // configuration: --kubeconfig flag, KUBECONFIG environment variable, in-cluster configuration. - Client() (clientset.Interface, error) // KubeClient returns a Kubernetes client. It uses the following priority to specify the cluster // configuration: --kubeconfig flag, KUBECONFIG environment variable, in-cluster configuration. KubeClient() (kubernetes.Interface, error) @@ -114,19 +110,6 @@ func (f *factory) ClientConfig() (*rest.Config, error) { return Config(f.kubeconfig, f.kubecontext, f.baseName, f.clientQPS, f.clientBurst) } -func (f *factory) Client() (clientset.Interface, error) { - clientConfig, err := f.ClientConfig() - if err != nil { - return nil, err - } - - veleroClient, err := clientset.NewForConfig(clientConfig) - if err != nil { - return nil, errors.WithStack(err) - } - return veleroClient, nil -} - func (f *factory) KubeClient() (kubernetes.Interface, error) { clientConfig, err := f.ClientConfig() if err != nil { diff --git a/test/util/providers/aws_utils.go b/test/util/providers/aws_utils.go index f9dd597ac..9f70a6708 100644 --- a/test/util/providers/aws_utils.go +++ b/test/util/providers/aws_utils.go @@ -17,19 +17,24 @@ limitations under the License. package providers import ( + "crypto/tls" + "crypto/x509" "fmt" + "net/http" "net/url" "os" - "strconv" "strings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/s3/s3manager" + "github.com/aws/aws-sdk-go-v2/config" + s3types "github.com/aws/aws-sdk-go-v2/service/s3/types" + + "github.com/aws/aws-sdk-go-v2/aws" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + s3manager "github.com/aws/aws-sdk-go-v2/feature/s3/manager" + "github.com/aws/aws-sdk-go-v2/service/s3" + + "github.com/aws/aws-sdk-go-v2/service/ec2" + ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/pkg/errors" "golang.org/x/net/context" @@ -55,10 +60,26 @@ const ( enableSharedConfigKey = "enableSharedConfig" ) -func newSessionOptions(config aws.Config, profile, caCert, credentialsFile, enableSharedConfig string) (session.Options, error) { - sessionOptions := session.Options{Config: config, Profile: profile} - if caCert != "" { - sessionOptions.CustomCABundle = strings.NewReader(caCert) +func newAWSConfig(region, profile, credentialsFile string, insecureSkipTLSVerify bool, caCert string) (aws.Config, error) { + empty := aws.Config{} + client := awshttp.NewBuildableClient().WithTransportOptions(func(tr *http.Transport) { + if len(caCert) > 0 { + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM([]byte(caCert)) + if tr.TLSClientConfig == nil { + tr.TLSClientConfig = &tls.Config{ + RootCAs: caCertPool, + } + } else { + tr.TLSClientConfig.RootCAs = caCertPool + } + } + tr.TLSClientConfig.InsecureSkipVerify = insecureSkipTLSVerify + }) + opts := []func(*config.LoadOptions) error{ + config.WithRegion(region), + config.WithSharedConfigProfile(profile), + config.WithHTTPClient(client), } if credentialsFile == "" && os.Getenv("AWS_SHARED_CREDENTIALS_FILE") != "" { @@ -68,104 +89,60 @@ func newSessionOptions(config aws.Config, profile, caCert, credentialsFile, enab if credentialsFile != "" { if _, err := os.Stat(credentialsFile); err != nil { if os.IsNotExist(err) { - return session.Options{}, errors.Wrapf(err, "provided credentialsFile does not exist") + return empty, errors.Wrapf(err, "provided credentialsFile does not exist") } - return session.Options{}, errors.Wrapf(err, "could not get credentialsFile info") + return empty, errors.Wrapf(err, "could not get credentialsFile info") } - sessionOptions.SharedConfigFiles = append(sessionOptions.SharedConfigFiles, credentialsFile) - sessionOptions.SharedConfigState = session.SharedConfigEnable + opts = append(opts, config.WithSharedCredentialsFiles([]string{credentialsFile}), + config.WithSharedConfigFiles([]string{credentialsFile})) } - return sessionOptions, nil + awsConfig, err := config.LoadDefaultConfig(context.Background(), opts...) + if err != nil { + return empty, errors.Wrapf(err, "could not load config") + } + if _, err := awsConfig.Credentials.Retrieve(context.Background()); err != nil { + return empty, errors.WithStack(err) + } + + return awsConfig, nil } -// takes AWS session options to create a new session -func getSession(options session.Options) (*session.Session, error) { - sess, err := session.NewSessionWithOptions(options) - if err != nil { - return nil, errors.WithStack(err) +func newS3Client(cfg aws.Config, url string, forcePathStyle bool) (*s3.Client, error) { + opts := []func(*s3.Options){ + func(o *s3.Options) { + o.UsePathStyle = forcePathStyle + }, + } + if url != "" { + if !IsValidS3URLScheme(url) { + return nil, errors.Errorf("Invalid s3 url %s, URL must be valid according to https://golang.org/pkg/net/url/#Parse and start with http:// or https://", url) + } + opts = append(opts, func(o *s3.Options) { + o.BaseEndpoint = aws.String(url) + }) } - if _, err := sess.Config.Credentials.Get(); err != nil { - return nil, errors.WithStack(err) - } - - return sess, nil + return s3.NewFromConfig(cfg, opts...), nil } // GetBucketRegion returns the AWS region that a bucket is in, or an error // if the region cannot be determined. func GetBucketRegion(bucket string) (string, error) { - var region string - session, err := session.NewSession() + cfg, err := config.LoadDefaultConfig(context.Background()) if err != nil { return "", errors.WithStack(err) } - - for _, partition := range endpoints.DefaultPartitions() { - for regionHint := range partition.Regions() { - region, _ = s3manager.GetBucketRegion(context.Background(), session, bucket, regionHint) - - // we only need to try a single region hint per partition, so break after the first - break - } - - if region != "" { - return region, nil - } - } - - return "", errors.New("unable to determine bucket's region") -} - -func (s AWSStorage) CreateSession(credentialProfile, credentialsFile, enableSharedConfig, caCert, bucket, bslPrefix, bslConfig string) (*session.Session, error) { - var err error - config := flag.NewMap() - config.Set(bslConfig) - region := config.Data()["region"] - objectsInput := s3.ListObjectsV2Input{} - objectsInput.Bucket = aws.String(bucket) - objectsInput.Delimiter = aws.String("/") - s3URL := "" - s3ForcePathStyleVal := "" - s3ForcePathStyle := false - - if s3ForcePathStyleVal != "" { - if s3ForcePathStyle, err = strconv.ParseBool(s3ForcePathStyleVal); err != nil { - return nil, errors.Wrapf(err, "could not parse %s (expected bool)", s3ForcePathStyleKey) - } - } - - // AWS (not an alternate S3-compatible API) and region not - // explicitly specified: determine the bucket's region - if s3URL == "" && region == "" { - var err error - - region, err = GetBucketRegion(bucket) - if err != nil { - return nil, err - } - } - - serverConfig, err := newAWSConfig(s3URL, region, s3ForcePathStyle) + client := s3.NewFromConfig(cfg) + region, err := s3manager.GetBucketRegion(context.Background(), client, bucket) if err != nil { - return nil, err + return "", errors.WithStack(err) } - - sessionOptions, err := newSessionOptions(*serverConfig, credentialProfile, caCert, credentialsFile, enableSharedConfig) - if err != nil { - return nil, err + if region == "" { + return "", errors.New("unable to determine bucket's region") } - - serverSession, err := getSession(sessionOptions) - - if err != nil { - fmt.Println(err) - return nil, err - } - - return serverSession, nil + return region, nil } // IsValidS3URLScheme returns true if the scheme is http:// or https:// @@ -180,34 +157,9 @@ func IsValidS3URLScheme(s3URL string) bool { } return true } -func newAWSConfig(url, region string, forcePathStyle bool) (*aws.Config, error) { - awsConfig := aws.NewConfig(). - WithRegion(region). - WithS3ForcePathStyle(forcePathStyle) - if url != "" { - if !IsValidS3URLScheme(url) { - return nil, errors.Errorf("Invalid s3 url %s, URL must be valid according to https://golang.org/pkg/net/url/#Parse and start with http:// or https://", url) - } - - awsConfig = awsConfig.WithEndpointResolver( - endpoints.ResolverFunc(func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { - if service == s3.EndpointsID { - return endpoints.ResolvedEndpoint{ - URL: url, - }, nil - } - - return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) - }), - ) - } - - return awsConfig, nil -} - -func (s AWSStorage) ListItems(client *s3.S3, objectsV2Input *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) { - res, err := client.ListObjectsV2(objectsV2Input) +func (s AWSStorage) ListItems(client *s3.Client, objectsV2Input *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) { + res, err := client.ListObjectsV2(context.Background(), objectsV2Input) if err != nil { return nil, err } @@ -215,14 +167,15 @@ func (s AWSStorage) ListItems(client *s3.S3, objectsV2Input *s3.ListObjectsV2Inp return res, nil } -func (s AWSStorage) DeleteItem(client *s3.S3, deleteObjectV2Input *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) { - res, err := client.DeleteObject(deleteObjectV2Input) +func (s AWSStorage) DeleteItem(client *s3.Client, deleteObjectV2Input *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) { + res, err := client.DeleteObject(context.Background(), deleteObjectV2Input) if err != nil { return nil, err } fmt.Println(res) return res, nil } + func (s AWSStorage) IsObjectsInBucket(cloudCredentialsFile, bslBucket, bslPrefix, bslConfig, backupObject string) (bool, error) { config := flag.NewMap() config.Set(bslConfig) @@ -235,38 +188,42 @@ func (s AWSStorage) IsObjectsInBucket(cloudCredentialsFile, bslBucket, bslPrefix } var err error - var s3Config *aws.Config - var sess *session.Session + var s3Config aws.Config + var s3Client *s3.Client region := config.Data()["region"] s3url := "" - + if region == "" { + region, err = GetBucketRegion(bslBucket) + if err != nil { + return false, errors.Wrapf(err, "failed to get region for bucket %s", bslBucket) + } + } if region == "minio" { s3url = config.Data()["s3Url"] - s3Config = &aws.Config{ - Credentials: credentials.NewSharedCredentials(cloudCredentialsFile, ""), - Endpoint: aws.String(s3url), - Region: aws.String(region), - DisableSSL: aws.Bool(true), - S3ForcePathStyle: aws.Bool(true), + s3Config, err = newAWSConfig(region, "", cloudCredentialsFile, true, "") + if err != nil { + return false, errors.Wrapf(err, "Failed to create AWS config of region %s", region) } - sess, err = session.NewSession(s3Config) + s3Client, err = newS3Client(s3Config, s3url, true) } else { - sess, err = s.CreateSession("", cloudCredentialsFile, "false", "", "", "", bslConfig) + s3Config, err = newAWSConfig(region, "", cloudCredentialsFile, false, "") + if err != nil { + return false, errors.Wrapf(err, "Failed to create AWS config of region %s", region) + } + s3Client, err = newS3Client(s3Config, s3url, true) } - if err != nil { - return false, errors.Wrapf(err, fmt.Sprintf("Failed to create AWS session of region %s", region)) + return false, errors.Wrapf(err, "failed to create S3 client of region %s", region) } - svc := s3.New(sess) - bucketObjects, err := s.ListItems(svc, &objectsInput) + bucketObjects, err := s.ListItems(s3Client, &objectsInput) if err != nil { fmt.Println("Couldn't retrieve bucket items!") return false, errors.Wrapf(err, "Couldn't retrieve bucket items") } for _, item := range bucketObjects.Contents { - fmt.Println(*item) + fmt.Println(item) } var backupNameInStorage string for _, item := range bucketObjects.CommonPrefixes { @@ -288,50 +245,64 @@ func (s AWSStorage) DeleteObjectsInBucket(cloudCredentialsFile, bslBucket, bslPr config.Set(bslConfig) var err error - var sess *session.Session + var s3Config aws.Config + var s3Client *s3.Client region := config.Data()["region"] s3url := "" - - s3Config := &aws.Config{ - Region: aws.String(region), - Credentials: credentials.NewSharedCredentials(cloudCredentialsFile, ""), + if region == "" { + region, err = GetBucketRegion(bslBucket) + if err != nil { + return errors.Wrapf(err, "failed to get region for bucket %s", bslBucket) + } } if region == "minio" { s3url = config.Data()["s3Url"] - s3Config = &aws.Config{ - Credentials: credentials.NewSharedCredentials(cloudCredentialsFile, ""), - Endpoint: aws.String(s3url), - Region: aws.String(region), - DisableSSL: aws.Bool(true), - S3ForcePathStyle: aws.Bool(true), + s3Config, err = newAWSConfig(region, "", cloudCredentialsFile, true, "") + if err != nil { + return errors.Wrapf(err, "Failed to create AWS config of region %s", region) } - sess, err = session.NewSession(s3Config) + s3Client, err = newS3Client(s3Config, s3url, true) } else { - sess, err = s.CreateSession("", cloudCredentialsFile, "false", "", "", "", bslConfig) + s3Config, err = newAWSConfig(region, "", cloudCredentialsFile, false, "") + if err != nil { + return errors.Wrapf(err, "Failed to create AWS config of region %s", region) + } + s3Client, err = newS3Client(s3Config, s3url, false) } if err != nil { - return errors.Wrapf(err, fmt.Sprintf("Failed to create AWS session of region %s", region)) + return errors.Wrapf(err, "Failed to create S3 client of region %s", region) } - - svc := s3.New(sess) - fullPrefix := strings.Trim(bslPrefix, "/") + "/" + strings.Trim(backupObject, "/") + "/" - iter := s3manager.NewDeleteListIterator(svc, &s3.ListObjectsInput{ + listInput := &s3.ListObjectsV2Input{ Bucket: aws.String(bslBucket), Prefix: aws.String(fullPrefix), + } + // list all keys + var objectIds []s3types.ObjectIdentifier + p := s3.NewListObjectsV2Paginator(s3Client, listInput) + for p.HasMorePages() { + page, err := p.NextPage(context.Background()) + if err != nil { + return errors.Wrapf(err, "failed to list objects in bucket %s", bslBucket) + } + for _, obj := range page.Contents { + objectIds = append(objectIds, s3types.ObjectIdentifier{Key: aws.String(*obj.Key)}) + } + } + _, err = s3Client.DeleteObjects(context.Background(), &s3.DeleteObjectsInput{ + Bucket: aws.String(bslBucket), + Delete: &s3types.Delete{Objects: objectIds}, }) - - if err := s3manager.NewBatchDeleteWithClient(svc).Delete(aws.BackgroundContext(), iter); err != nil { - return errors.Wrapf(err, "Fail to delete object") + if err != nil { + return errors.Wrapf(err, "failed to delete objects from bucket %s", bslBucket) } fmt.Printf("Deleted object(s) from bucket: %s %s \n", bslBucket, fullPrefix) return nil } func (s AWSStorage) IsSnapshotExisted(cloudCredentialsFile, bslConfig, backupObject string, snapshotCheck test.SnapshotCheckPoint) error { - config := flag.NewMap() config.Set(bslConfig) region := config.Data()["region"] @@ -339,26 +310,22 @@ func (s AWSStorage) IsSnapshotExisted(cloudCredentialsFile, bslConfig, backupObj if region == "minio" { return errors.New("No snapshot for Minio provider") } - sess, err := s.CreateSession("", cloudCredentialsFile, "false", "", "", "", bslConfig) - + cfg, err := newAWSConfig(region, "", cloudCredentialsFile, false, "") if err != nil { - fmt.Printf("Fail to create session with profile %s and config %s", cloudCredentialsFile, bslConfig) - return errors.Wrapf(err, "Fail to create session with profile %s and config %s", cloudCredentialsFile, bslConfig) + return errors.Wrapf(err, "Failed to create AWS config of region %s", region) } - svc := ec2.New(sess) - params := &ec2.DescribeSnapshotsInput{ - OwnerIds: []*string{aws.String("self")}, - Filters: []*ec2.Filter{ + ec2Client := ec2.NewFromConfig(cfg) + input := &ec2.DescribeSnapshotsInput{ + OwnerIds: []string{"self"}, + Filters: []ec2types.Filter{ { - Name: aws.String("tag:velero.io/backup"), - Values: []*string{ - aws.String(backupObject), - }, + Name: aws.String("tag:velero.io/backup"), + Values: []string{backupObject}, }, }, } - result, err := svc.DescribeSnapshots(params) + result, err := ec2Client.DescribeSnapshots(context.Background(), input) if err != nil { fmt.Println(err) } @@ -386,52 +353,46 @@ func (s AWSStorage) GetMinioBucketSize(cloudCredentialsFile, bslBucket, bslPrefi config := flag.NewMap() config.Set(bslConfig) region := config.Data()["region"] - s3url := config.Data()["s3Url"] - s3Config := &aws.Config{ - Credentials: credentials.NewSharedCredentials(cloudCredentialsFile, ""), - Endpoint: aws.String(s3url), - Region: aws.String(region), - DisableSSL: aws.Bool(true), - S3ForcePathStyle: aws.Bool(true), - } if region != "minio" { return 0, errors.New("it only supported by minio") } - sess, err := session.NewSession(s3Config) + s3url := config.Data()["s3Url"] + s3Config, err := newAWSConfig(region, "", cloudCredentialsFile, true, "") if err != nil { - return 0, errors.Wrapf(err, "Error create config session") + return 0, errors.Wrapf(err, "failed to create AWS config of region %s", region) } + s3Client, err := newS3Client(s3Config, s3url, true) + if err != nil { + return 0, errors.Wrapf(err, "failed to create S3 client of region %s", region) + } + /* + s3Config := &aws.Config{ + Credentials: credentials.NewSharedCredentials(cloudCredentialsFile, ""), + Endpoint: aws.String(s3url), + Region: aws.String(region), + DisableSSL: aws.Bool(true), + S3ForcePathStyle: aws.Bool(true), + } + */ - svc := s3.New(sess) var totalSize int64 - var continuationToken *string // Paginate through objects in the bucket objectsInput := &s3.ListObjectsV2Input{ - Bucket: aws.String(bslBucket), - ContinuationToken: continuationToken, + Bucket: aws.String(bslBucket), } if bslPrefix != "" { objectsInput.Prefix = aws.String(bslPrefix) } - for { - resp, err := svc.ListObjectsV2(objectsInput) + p := s3.NewListObjectsV2Paginator(s3Client, objectsInput) + for p.HasMorePages() { + page, err := p.NextPage(context.Background()) if err != nil { - return 0, errors.Wrapf(err, "Error list objects") + return 0, errors.Wrapf(err, "failed to list objects in bucket %s", bslBucket) } - - // Process objects in the current response - for _, obj := range resp.Contents { - totalSize += *obj.Size + for _, obj := range page.Contents { + totalSize += obj.Size } - - // Check if there are more objects to retrieve - if !*resp.IsTruncated { - break - } - - // Set the continuation token for the next page - continuationToken = resp.NextContinuationToken } return totalSize, nil } diff --git a/test/util/velero/velero_utils.go b/test/util/velero/velero_utils.go index f256aa6f1..a106cf5b5 100644 --- a/test/util/velero/velero_utils.go +++ b/test/util/velero/velero_utils.go @@ -35,11 +35,9 @@ import ( "github.com/pkg/errors" "golang.org/x/exp/slices" - "k8s.io/apimachinery/pkg/util/wait" - - kbclient "sigs.k8s.io/controller-runtime/pkg/client" - ver "k8s.io/apimachinery/pkg/util/version" + "k8s.io/apimachinery/pkg/util/wait" + kbclient "sigs.k8s.io/controller-runtime/pkg/client" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" cliinstall "github.com/vmware-tanzu/velero/pkg/cmd/cli/install" @@ -1161,7 +1159,7 @@ func SnapshotCRsCountShouldBe(ctx context.Context, namespace, backupName string, } func BackupRepositoriesCountShouldBe(ctx context.Context, veleroNamespace, targetNamespace string, expectedCount int) error { - resticArr, err := GetResticRepositories(ctx, veleroNamespace, targetNamespace) + resticArr, err := GetRepositories(ctx, veleroNamespace, targetNamespace) if err != nil { return errors.Wrapf(err, "Fail to get BackupRepositories") } @@ -1172,11 +1170,11 @@ func BackupRepositoriesCountShouldBe(ctx context.Context, veleroNamespace, targe } } -func GetResticRepositories(ctx context.Context, veleroNamespace, targetNamespace string) ([]string, error) { +func GetRepositories(ctx context.Context, veleroNamespace, targetNamespace string) ([]string, error) { cmds := []*common.OsCommandLine{} cmd := &common.OsCommandLine{ Cmd: "kubectl", - Args: []string{"get", "-n", veleroNamespace, "BackupRepositories"}, + Args: []string{"get", "-n", veleroNamespace, "backuprepositories.velero.io"}, } cmds = append(cmds, cmd)