diff --git a/.github/auto-assignees.yml b/.github/auto-assignees.yml index 282591d72..8f5a75a57 100644 --- a/.github/auto-assignees.yml +++ b/.github/auto-assignees.yml @@ -16,6 +16,7 @@ reviewers: - blackpiglet - qiuming-best - shubham-pampattiwar + - Lyndon-Li tech-writer: - a-mccarthy diff --git a/.github/workflows/pr-changelog-check.yml b/.github/workflows/pr-changelog-check.yml index 647c9d89a..308e07d2d 100644 --- a/.github/workflows/pr-changelog-check.yml +++ b/.github/workflows/pr-changelog-check.yml @@ -1,5 +1,9 @@ name: Pull Request Changelog Check -on: [pull_request] +# by setting `on: [pull_request]`, that means action will be trigger when PR is opened, synchronize, reopened. +# Add labeled and unlabeled events too. +on: + pull_request: + types: [opened, synchronize, reopened, labeled, unlabeled] jobs: build: diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 2819a77e8..f62eb0f94 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -13,6 +13,7 @@ | Xun Jiang | [blackpiglet](https://github.com/blackpiglet) | [VMware](https://www.github.com/vmware/) | | Ming Qiu | [qiuming-best](https://github.com/qiuming-best) | [VMware](https://www.github.com/vmware/) | | Shubham Pampattiwar | [shubham-pampattiwar](https://github.com/shubham-pampattiwar) | [OpenShift](https://github.com/openshift) +| Yonghui Li | [Lyndon-Li](https://github.com/Lyndon-Li) | [VMware](https://www.github.com/vmware/) | ## Emeritus Maintainers * Adnan Abdulhussein ([prydonius](https://github.com/prydonius)) diff --git a/Makefile b/Makefile index 8dcf6ee50..b3a6a32f2 100644 --- a/Makefile +++ b/Makefile @@ -163,6 +163,7 @@ shell: build-dirs build-env @# under $GOPATH). @docker run \ -e GOFLAGS \ + -e GOPROXY \ -i $(TTY) \ --rm \ -u $$(id -u):$$(id -g) \ diff --git a/Tiltfile b/Tiltfile index 02da1df56..0d9a64263 100644 --- a/Tiltfile +++ b/Tiltfile @@ -7,7 +7,7 @@ k8s_yaml([ 'config/crd/v1/bases/velero.io_downloadrequests.yaml', 'config/crd/v1/bases/velero.io_podvolumebackups.yaml', 'config/crd/v1/bases/velero.io_podvolumerestores.yaml', - 'config/crd/v1/bases/velero.io_resticrepositories.yaml', + 'config/crd/v1/bases/velero.io_backuprepositories.yaml', 'config/crd/v1/bases/velero.io_restores.yaml', 'config/crd/v1/bases/velero.io_schedules.yaml', 'config/crd/v1/bases/velero.io_serverstatusrequests.yaml', diff --git a/changelogs/unreleased/4926-lyndon b/changelogs/unreleased/4926-lyndon new file mode 100644 index 000000000..d5c23db47 --- /dev/null +++ b/changelogs/unreleased/4926-lyndon @@ -0,0 +1 @@ +Unified Repository Design \ No newline at end of file diff --git a/changelogs/unreleased/5051-niulechuan b/changelogs/unreleased/5051-niulechuan new file mode 100644 index 000000000..1cba2e248 --- /dev/null +++ b/changelogs/unreleased/5051-niulechuan @@ -0,0 +1 @@ +Fix typo in doc, in https://velero.io/docs/main/restore-reference/ "Restore order" section, "Mamespace" should be "Namespace". diff --git a/changelogs/unreleased/5053-niulechuan b/changelogs/unreleased/5053-niulechuan new file mode 100644 index 000000000..f44c46a69 --- /dev/null +++ b/changelogs/unreleased/5053-niulechuan @@ -0,0 +1 @@ +Move 'velero.io/exclude-from-backup' label string to const diff --git a/changelogs/unreleased/5101-ywk253100 b/changelogs/unreleased/5101-ywk253100 new file mode 100644 index 000000000..ade00f2a9 --- /dev/null +++ b/changelogs/unreleased/5101-ywk253100 @@ -0,0 +1 @@ + Fix bsl validation bug: the BSL is validated continually and doesn't respect the validation period configured \ No newline at end of file diff --git a/changelogs/unreleased/5110-reasonerjt b/changelogs/unreleased/5110-reasonerjt new file mode 100644 index 000000000..350f91fa1 --- /dev/null +++ b/changelogs/unreleased/5110-reasonerjt @@ -0,0 +1 @@ +Dump stack trace when the plugin server handles panic \ No newline at end of file diff --git a/changelogs/unreleased/5122-sseago b/changelogs/unreleased/5122-sseago new file mode 100644 index 000000000..ec8dc473e --- /dev/null +++ b/changelogs/unreleased/5122-sseago @@ -0,0 +1 @@ +Modify BackupStoreGetter to avoid BSL spec changes diff --git a/changelogs/unreleased/5128-reasonerjt b/changelogs/unreleased/5128-reasonerjt new file mode 100644 index 000000000..3ba53b059 --- /dev/null +++ b/changelogs/unreleased/5128-reasonerjt @@ -0,0 +1 @@ +Let "make shell xxx" respect GOPROXY \ No newline at end of file diff --git a/changelogs/unreleased/5135-reasonerjt b/changelogs/unreleased/5135-reasonerjt new file mode 100644 index 000000000..0505ab046 --- /dev/null +++ b/changelogs/unreleased/5135-reasonerjt @@ -0,0 +1 @@ +Update the CRD for kopia integration \ No newline at end of file diff --git a/changelogs/unreleased/5142-lyndon b/changelogs/unreleased/5142-lyndon new file mode 100644 index 000000000..10286cf0b --- /dev/null +++ b/changelogs/unreleased/5142-lyndon @@ -0,0 +1,4 @@ +Kopia Integration: Add the Unified Repository Interface definition. +Kopia Integration: Add the changes for Unified Repository storage config. + +Related Issues; #5076, #5080 \ No newline at end of file diff --git a/changelogs/unreleased/5143-ywk253100 b/changelogs/unreleased/5143-ywk253100 new file mode 100644 index 000000000..cf5213645 --- /dev/null +++ b/changelogs/unreleased/5143-ywk253100 @@ -0,0 +1 @@ +This commit splits the pkg/restic package into several packages to support Kopia integration works \ No newline at end of file diff --git a/changelogs/unreleased/5145-jxun b/changelogs/unreleased/5145-jxun new file mode 100644 index 000000000..959ecf73d --- /dev/null +++ b/changelogs/unreleased/5145-jxun @@ -0,0 +1 @@ +Delay CA file deletion in PVB controller. \ No newline at end of file diff --git a/changelogs/unreleased/5148-jxun b/changelogs/unreleased/5148-jxun new file mode 100644 index 000000000..e0e489b8d --- /dev/null +++ b/changelogs/unreleased/5148-jxun @@ -0,0 +1 @@ +VolumeSnapshotLocation refactor with kubebuilder. \ No newline at end of file diff --git a/changelogs/unreleased/5157-jxun b/changelogs/unreleased/5157-jxun new file mode 100644 index 000000000..e42a0c93c --- /dev/null +++ b/changelogs/unreleased/5157-jxun @@ -0,0 +1 @@ +Add labeled and unlabeled events for PR changelog check action. \ No newline at end of file diff --git a/changelogs/unreleased/5165-reasonerjt b/changelogs/unreleased/5165-reasonerjt new file mode 100644 index 000000000..c33f179b3 --- /dev/null +++ b/changelogs/unreleased/5165-reasonerjt @@ -0,0 +1 @@ +Skip registering "crd-remap-version" plugin when feature flag "EnableAPIGroupVersions" is set \ No newline at end of file diff --git a/changelogs/unreleased/5167-lyndon b/changelogs/unreleased/5167-lyndon new file mode 100644 index 000000000..7bef82dcc --- /dev/null +++ b/changelogs/unreleased/5167-lyndon @@ -0,0 +1 @@ +Add changes for Kopia Integration: Unified Repository Provider - Repo Password \ No newline at end of file diff --git a/changelogs/unreleased/5172-qiuming-best b/changelogs/unreleased/5172-qiuming-best new file mode 100644 index 000000000..fa57d3d30 --- /dev/null +++ b/changelogs/unreleased/5172-qiuming-best @@ -0,0 +1 @@ +Fix restic backups to multiple backup storage locations bug diff --git a/changelogs/unreleased/5174-jxun b/changelogs/unreleased/5174-jxun new file mode 100644 index 000000000..4c3991b28 --- /dev/null +++ b/changelogs/unreleased/5174-jxun @@ -0,0 +1 @@ +Reduce CRD size. \ No newline at end of file diff --git a/changelogs/unreleased/5178-allenxu404 b/changelogs/unreleased/5178-allenxu404 new file mode 100644 index 000000000..2c1b4b0e0 --- /dev/null +++ b/changelogs/unreleased/5178-allenxu404 @@ -0,0 +1,2 @@ +Treat namespaces with exclude label as excludedNamespaces +Related issue: #2413 diff --git a/changelogs/unreleased/5181-jxun b/changelogs/unreleased/5181-jxun new file mode 100644 index 000000000..4333691d2 --- /dev/null +++ b/changelogs/unreleased/5181-jxun @@ -0,0 +1 @@ +Add annotation "pv.kubernetes.io/migrated-to" for CSI checking. \ No newline at end of file diff --git a/config/crd/v1/bases/velero.io_resticrepositories.yaml b/config/crd/v1/bases/velero.io_backuprepositories.yaml similarity index 78% rename from config/crd/v1/bases/velero.io_resticrepositories.yaml rename to config/crd/v1/bases/velero.io_backuprepositories.yaml index 251766136..fa7e5596e 100644 --- a/config/crd/v1/bases/velero.io_resticrepositories.yaml +++ b/config/crd/v1/bases/velero.io_backuprepositories.yaml @@ -6,20 +6,23 @@ metadata: annotations: controller-gen.kubebuilder.io/version: v0.7.0 creationTimestamp: null - name: resticrepositories.velero.io + name: backuprepositories.velero.io spec: group: velero.io names: - kind: ResticRepository - listKind: ResticRepositoryList - plural: resticrepositories - singular: resticrepository + kind: BackupRepository + listKind: BackupRepositoryList + plural: backuprepositories + singular: backuprepository scope: Namespaced versions: - additionalPrinterColumns: - jsonPath: .metadata.creationTimestamp name: Age type: date + - jsonPath: .spec.repositoryType + name: Repository Type + type: string name: v1 schema: openAPIV3Schema: @@ -37,7 +40,7 @@ spec: metadata: type: object spec: - description: ResticRepositorySpec is the specification for a ResticRepository. + description: BackupRepositorySpec is the specification for a BackupRepository. properties: backupStorageLocation: description: BackupStorageLocation is the name of the BackupStorageLocation @@ -47,12 +50,19 @@ spec: description: MaintenanceFrequency is how often maintenance should be run. type: string + repositoryType: + description: RepositoryType indicates the type of the backend repository + enum: + - kopia + - restic + - "" + type: string resticIdentifier: description: ResticIdentifier is the full restic-compatible string for identifying this repository. type: string volumeNamespace: - description: VolumeNamespace is the namespace this restic repository + description: VolumeNamespace is the namespace this backup repository contains pod volume backups for. type: string required: @@ -62,7 +72,7 @@ spec: - volumeNamespace type: object status: - description: ResticRepositoryStatus is the current status of a ResticRepository. + description: BackupRepositoryStatus is the current status of a BackupRepository. properties: lastMaintenanceTime: description: LastMaintenanceTime is the last time maintenance was @@ -72,10 +82,10 @@ spec: type: string message: description: Message is a message about the current status of the - ResticRepository. + BackupRepository. type: string phase: - description: Phase is the current state of the ResticRepository. + description: Phase is the current state of the BackupRepository. enum: - New - Ready diff --git a/config/crd/v1/bases/velero.io_podvolumebackups.yaml b/config/crd/v1/bases/velero.io_podvolumebackups.yaml index 69993f0d4..6af4ce294 100644 --- a/config/crd/v1/bases/velero.io_podvolumebackups.yaml +++ b/config/crd/v1/bases/velero.io_podvolumebackups.yaml @@ -37,9 +37,13 @@ spec: jsonPath: .spec.volume name: Volume type: string - - description: Restic repository identifier for this backup + - description: Backup repository identifier for this backup jsonPath: .spec.repoIdentifier - name: Restic Repo + name: Repository ID + type: string + - description: The type of the uploader to handle data transfer + jsonPath: .spec.uploaderType + name: Uploader Type type: string - description: Name of the Backup Storage Location where this backup should be stored @@ -70,7 +74,7 @@ spec: properties: backupStorageLocation: description: BackupStorageLocation is the name of the backup storage - location where the restic repository is stored. + location where the backup repository is stored. type: string node: description: Node is the name of the node that the Pod is running @@ -114,7 +118,7 @@ spec: type: string type: object repoIdentifier: - description: RepoIdentifier is the restic repository identifier. + description: RepoIdentifier is the backup repository identifier. type: string tags: additionalProperties: @@ -122,6 +126,14 @@ spec: description: Tags are a map of key-value pairs that should be applied to the volume backup as tags. type: object + uploaderType: + description: UploaderType is the type of the uploader to handle the + data transfer. + enum: + - kopia + - restic + - "" + type: string volume: description: Volume is the name of the volume within the Pod to be backed up. diff --git a/config/crd/v1/bases/velero.io_podvolumerestores.yaml b/config/crd/v1/bases/velero.io_podvolumerestores.yaml index 6f77cb67c..036f58a06 100644 --- a/config/crd/v1/bases/velero.io_podvolumerestores.yaml +++ b/config/crd/v1/bases/velero.io_podvolumerestores.yaml @@ -25,6 +25,10 @@ spec: jsonPath: .spec.pod.name name: Pod type: string + - description: The type of the uploader to handle data transfer + jsonPath: .spec.uploaderType + name: Uploader Type + type: string - description: Name of the volume to be restored jsonPath: .spec.volume name: Volume @@ -67,7 +71,7 @@ spec: properties: backupStorageLocation: description: BackupStorageLocation is the name of the backup storage - location where the restic repository is stored. + location where the backup repository is stored. type: string pod: description: Pod is a reference to the pod containing the volume to @@ -107,11 +111,19 @@ spec: type: string type: object repoIdentifier: - description: RepoIdentifier is the restic repository identifier. + description: RepoIdentifier is the backup repository identifier. type: string snapshotID: description: SnapshotID is the ID of the volume snapshot to be restored. type: string + uploaderType: + description: UploaderType is the type of the uploader to handle the + data transfer. + enum: + - kopia + - restic + - "" + type: string volume: description: Volume is the name of the volume within the Pod to be restored. diff --git a/config/crd/v1/bases/velero.io_restores.yaml b/config/crd/v1/bases/velero.io_restores.yaml index 4bc98b3a4..181a4e5fa 100644 --- a/config/crd/v1/bases/velero.io_restores.yaml +++ b/config/crd/v1/bases/velero.io_restores.yaml @@ -1,3 +1,5 @@ + +--- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -200,1426 +202,9 @@ spec: description: InitContainers is list of init containers to be added to a pod during its restore. items: - description: A single application container - that you want to run within a pod. - properties: - args: - description: 'Arguments to the entrypoint. - The docker image''s CMD is used if this - is not provided. Variable references $(VAR_NAME) - are expanded using the container''s environment. - If a variable cannot be resolved, the - reference in the input string will be - unchanged. Double $$ are reduced to a - single $, which allows for escaping the - $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" - will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, - regardless of whether the variable exists - or not. Cannot be updated. More info: - https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' - items: - type: string - type: array - command: - description: 'Entrypoint array. Not executed - within a shell. The docker image''s ENTRYPOINT - is used if this is not provided. Variable - references $(VAR_NAME) are expanded using - the container''s environment. If a variable - cannot be resolved, the reference in the - input string will be unchanged. Double - $$ are reduced to a single $, which allows - for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string - literal "$(VAR_NAME)". Escaped references - will never be expanded, regardless of - whether the variable exists or not. Cannot - be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' - items: - type: string - type: array - env: - description: List of environment variables - to set in the container. Cannot be updated. - items: - description: EnvVar represents an environment - variable present in a Container. - properties: - name: - description: Name of the environment - variable. Must be a C_IDENTIFIER. - type: string - value: - description: 'Variable references - $(VAR_NAME) are expanded using the - previously defined environment variables - in the container and any service - environment variables. If a variable - cannot be resolved, the reference - in the input string will be unchanged. - Double $$ are reduced to a single - $, which allows for escaping the - $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" - will produce the string literal - "$(VAR_NAME)". Escaped references - will never be expanded, regardless - of whether the variable exists or - not. Defaults to "".' - type: string - valueFrom: - description: Source for the environment - variable's value. Cannot be used - if value is not empty. - properties: - configMapKeyRef: - description: Selects a key of - a ConfigMap. - properties: - key: - description: The key to select. - type: string - name: - description: 'Name of the - referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. - apiVersion, kind, uid?' - type: string - optional: - description: Specify whether - the ConfigMap or its key - must be defined - type: boolean - required: - - key - type: object - fieldRef: - description: 'Selects a field - of the pod: supports metadata.name, - metadata.namespace, `metadata.labels['''']`, - `metadata.annotations['''']`, - spec.nodeName, spec.serviceAccountName, - status.hostIP, status.podIP, - status.podIPs.' - properties: - apiVersion: - description: Version of the - schema the FieldPath is - written in terms of, defaults - to "v1". - type: string - fieldPath: - description: Path of the field - to select in the specified - API version. - type: string - required: - - fieldPath - type: object - resourceFieldRef: - description: 'Selects a resource - of the container: only resources - limits and requests (limits.cpu, - limits.memory, limits.ephemeral-storage, - requests.cpu, requests.memory - and requests.ephemeral-storage) - are currently supported.' - properties: - containerName: - description: 'Container name: - required for volumes, optional - for env vars' - type: string - divisor: - anyOf: - - type: integer - - type: string - description: Specifies the - output format of the exposed - resources, defaults to "1" - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - resource: - description: 'Required: resource - to select' - type: string - required: - - resource - type: object - secretKeyRef: - description: Selects a key of - a secret in the pod's namespace - properties: - key: - description: The key of the - secret to select from. Must - be a valid secret key. - type: string - name: - description: 'Name of the - referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. - apiVersion, kind, uid?' - type: string - optional: - description: Specify whether - the Secret or its key must - be defined - type: boolean - required: - - key - type: object - type: object - required: - - name - type: object - type: array - envFrom: - description: List of sources to populate - environment variables in the container. - The keys defined within a source must - be a C_IDENTIFIER. All invalid keys will - be reported as an event when the container - is starting. When a key exists in multiple - sources, the value associated with the - last source will take precedence. Values - defined by an Env with a duplicate key - will take precedence. Cannot be updated. - items: - description: EnvFromSource represents - the source of a set of ConfigMaps - properties: - configMapRef: - description: The ConfigMap to select - from - properties: - name: - description: 'Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. - apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the - ConfigMap must be defined - type: boolean - type: object - prefix: - description: An optional identifier - to prepend to each key in the ConfigMap. - Must be a C_IDENTIFIER. - type: string - secretRef: - description: The Secret to select - from - properties: - name: - description: 'Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. - apiVersion, kind, uid?' - type: string - optional: - description: Specify whether the - Secret must be defined - type: boolean - type: object - type: object - type: array - image: - description: 'Docker image name. More info: - https://kubernetes.io/docs/concepts/containers/images - This field is optional to allow higher - level config management to default or - override container images in workload - controllers like Deployments and StatefulSets.' - type: string - imagePullPolicy: - description: 'Image pull policy. One of - Always, Never, IfNotPresent. Defaults - to Always if :latest tag is specified, - or IfNotPresent otherwise. Cannot be updated. - More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' - type: string - lifecycle: - description: Actions that the management - system should take in response to container - lifecycle events. Cannot be updated. - properties: - postStart: - description: 'PostStart is called immediately - after a container is created. If the - handler fails, the container is terminated - and restarted according to its restart - policy. Other management of the container - blocks until the hook completes. More - info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' - properties: - exec: - description: One and only one of - the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the - command line to execute inside - the container, the working - directory for the command is - root ('/') in the container's - filesystem. The command is - simply exec'd, it is not run - inside a shell, so traditional - shell instructions ('|', etc) - won't work. To use a shell, - you need to explicitly call - out to that shell. Exit status - of 0 is treated as live/healthy - and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the - http request to perform. - properties: - host: - description: Host name to connect - to, defaults to the pod IP. - You probably want to set "Host" - in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers - to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes - a custom header to be used - in HTTP probes - properties: - name: - description: The header - field name - type: string - value: - description: The header - field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access - on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number - of the port to access on the - container. Number must be - in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for - connecting to the host. Defaults - to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: 'TCPSocket specifies - an action involving a TCP port. - TCP hooks not yet supported TODO: - implement a realistic TCP lifecycle - hook' - properties: - host: - description: 'Optional: Host - name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name - of the port to access on the - container. Number must be - in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - preStop: - description: 'PreStop is called immediately - before a container is terminated due - to an API request or management event - such as liveness/startup probe failure, - preemption, resource contention, etc. - The handler is not called if the container - crashes or exits. The reason for termination - is passed to the handler. The Pod''s - termination grace period countdown - begins before the PreStop hooked is - executed. Regardless of the outcome - of the handler, the container will - eventually terminate within the Pod''s - termination grace period. Other management - of the container blocks until the - hook completes or until the termination - grace period is reached. More info: - https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' - properties: - exec: - description: One and only one of - the following should be specified. - Exec specifies the action to take. - properties: - command: - description: Command is the - command line to execute inside - the container, the working - directory for the command is - root ('/') in the container's - filesystem. The command is - simply exec'd, it is not run - inside a shell, so traditional - shell instructions ('|', etc) - won't work. To use a shell, - you need to explicitly call - out to that shell. Exit status - of 0 is treated as live/healthy - and non-zero is unhealthy. - items: - type: string - type: array - type: object - httpGet: - description: HTTPGet specifies the - http request to perform. - properties: - host: - description: Host name to connect - to, defaults to the pod IP. - You probably want to set "Host" - in httpHeaders instead. - type: string - httpHeaders: - description: Custom headers - to set in the request. HTTP - allows repeated headers. - items: - description: HTTPHeader describes - a custom header to be used - in HTTP probes - properties: - name: - description: The header - field name - type: string - value: - description: The header - field value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access - on the HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number - of the port to access on the - container. Number must be - in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for - connecting to the host. Defaults - to HTTP. - type: string - required: - - port - type: object - tcpSocket: - description: 'TCPSocket specifies - an action involving a TCP port. - TCP hooks not yet supported TODO: - implement a realistic TCP lifecycle - hook' - properties: - host: - description: 'Optional: Host - name to connect to, defaults - to the pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name - of the port to access on the - container. Number must be - in the range 1 to 65535. Name - must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - type: object - type: object - livenessProbe: - description: 'Periodic probe of container - liveness. Container will be restarted - if the probe fails. Cannot be updated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the - following should be specified. Exec - specifies the action to take. - properties: - command: - description: Command is the command - line to execute inside the container, - the working directory for the - command is root ('/') in the - container's filesystem. The command - is simply exec'd, it is not run - inside a shell, so traditional - shell instructions ('|', etc) - won't work. To use a shell, you - need to explicitly call out to - that shell. Exit status of 0 is - treated as live/healthy and non-zero - is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures - for the probe to be considered failed - after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http - request to perform. - properties: - host: - description: Host name to connect - to, defaults to the pod IP. You - probably want to set "Host" in - httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set - in the request. HTTP allows repeated - headers. - items: - description: HTTPHeader describes - a custom header to be used in - HTTP probes - properties: - name: - description: The header field - name - type: string - value: - description: The header field - value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the - HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the - port to access on the container. - Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting - to the host. Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after - the container has started before liveness - probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) - to perform the probe. Default to 10 - seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes - for the probe to be considered successful - after having failed. Defaults to 1. - Must be 1 for liveness and startup. - Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an - action involving a TCP port. TCP hooks - not yet supported TODO: implement - a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name - to connect to, defaults to the - pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the - port to access on the container. - Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: Optional duration in seconds - the pod needs to terminate gracefully - upon probe failure. The grace period - is the duration in seconds after the - processes running in the pod are sent - a termination signal and the time - when the processes are forcibly halted - with a kill signal. Set this value - longer than the expected cleanup time - for your process. If this value is - nil, the pod's terminationGracePeriodSeconds - will be used. Otherwise, this value - overrides the value provided by the - pod spec. Value must be non-negative - integer. The value zero indicates - stop immediately via the kill signal - (no opportunity to shut down). This - is a beta field and requires enabling - ProbeTerminationGracePeriod feature - gate. Minimum value is 1. spec.terminationGracePeriodSeconds - is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: 'Number of seconds after - which the probe times out. Defaults - to 1 second. Minimum value is 1. More - info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - name: - description: Name of the container specified - as a DNS_LABEL. Each container in a pod - must have a unique name (DNS_LABEL). Cannot - be updated. - type: string - ports: - description: List of ports to expose from - the container. Exposing a port here gives - the system additional information about - the network connections a container uses, - but is primarily informational. Not specifying - a port here DOES NOT prevent that port - from being exposed. Any port which is - listening on the default "0.0.0.0" address - inside a container will be accessible - from the network. Cannot be updated. - items: - description: ContainerPort represents - a network port in a single container. - properties: - containerPort: - description: Number of port to expose - on the pod's IP address. This must - be a valid port number, 0 < x < - 65536. - format: int32 - type: integer - hostIP: - description: What host IP to bind - the external port to. - type: string - hostPort: - description: Number of port to expose - on the host. If specified, this - must be a valid port number, 0 < - x < 65536. If HostNetwork is specified, - this must match ContainerPort. Most - containers do not need this. - format: int32 - type: integer - name: - description: If specified, this must - be an IANA_SVC_NAME and unique within - the pod. Each named port in a pod - must have a unique name. Name for - the port that can be referred to - by services. - type: string - protocol: - default: TCP - description: Protocol for port. Must - be UDP, TCP, or SCTP. Defaults to - "TCP". - type: string - required: - - containerPort - - protocol - type: object - type: array - x-kubernetes-list-map-keys: - - containerPort - - protocol - x-kubernetes-list-type: map - readinessProbe: - description: 'Periodic probe of container - service readiness. Container will be removed - from service endpoints if the probe fails. - Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the - following should be specified. Exec - specifies the action to take. - properties: - command: - description: Command is the command - line to execute inside the container, - the working directory for the - command is root ('/') in the - container's filesystem. The command - is simply exec'd, it is not run - inside a shell, so traditional - shell instructions ('|', etc) - won't work. To use a shell, you - need to explicitly call out to - that shell. Exit status of 0 is - treated as live/healthy and non-zero - is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures - for the probe to be considered failed - after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http - request to perform. - properties: - host: - description: Host name to connect - to, defaults to the pod IP. You - probably want to set "Host" in - httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set - in the request. HTTP allows repeated - headers. - items: - description: HTTPHeader describes - a custom header to be used in - HTTP probes - properties: - name: - description: The header field - name - type: string - value: - description: The header field - value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the - HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the - port to access on the container. - Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting - to the host. Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after - the container has started before liveness - probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) - to perform the probe. Default to 10 - seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes - for the probe to be considered successful - after having failed. Defaults to 1. - Must be 1 for liveness and startup. - Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an - action involving a TCP port. TCP hooks - not yet supported TODO: implement - a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name - to connect to, defaults to the - pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the - port to access on the container. - Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: Optional duration in seconds - the pod needs to terminate gracefully - upon probe failure. The grace period - is the duration in seconds after the - processes running in the pod are sent - a termination signal and the time - when the processes are forcibly halted - with a kill signal. Set this value - longer than the expected cleanup time - for your process. If this value is - nil, the pod's terminationGracePeriodSeconds - will be used. Otherwise, this value - overrides the value provided by the - pod spec. Value must be non-negative - integer. The value zero indicates - stop immediately via the kill signal - (no opportunity to shut down). This - is a beta field and requires enabling - ProbeTerminationGracePeriod feature - gate. Minimum value is 1. spec.terminationGracePeriodSeconds - is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: 'Number of seconds after - which the probe times out. Defaults - to 1 second. Minimum value is 1. More - info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - resources: - description: 'Compute Resources required - by this container. Cannot be updated. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum - amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the - minimum amount of compute resources - required. If Requests is omitted for - a container, it defaults to Limits - if that is explicitly specified, otherwise - to an implementation-defined value. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' - type: object - type: object - securityContext: - description: 'SecurityContext defines the - security options the container should - be run with. If set, the fields of SecurityContext - override the equivalent fields of PodSecurityContext. - More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' - properties: - allowPrivilegeEscalation: - description: 'AllowPrivilegeEscalation - controls whether a process can gain - more privileges than its parent process. - This bool directly controls if the - no_new_privs flag will be set on the - container process. AllowPrivilegeEscalation - is true always when the container - is: 1) run as Privileged 2) has CAP_SYS_ADMIN' - type: boolean - capabilities: - description: The capabilities to add/drop - when running containers. Defaults - to the default set of capabilities - granted by the container runtime. - properties: - add: - description: Added capabilities - items: - description: Capability represent - POSIX capabilities type - type: string - type: array - drop: - description: Removed capabilities - items: - description: Capability represent - POSIX capabilities type - type: string - type: array - type: object - privileged: - description: Run container in privileged - mode. Processes in privileged containers - are essentially equivalent to root - on the host. Defaults to false. - type: boolean - procMount: - description: procMount denotes the type - of proc mount to use for the containers. - The default is DefaultProcMount which - uses the container runtime defaults - for readonly paths and masked paths. - This requires the ProcMountType feature - flag to be enabled. - type: string - readOnlyRootFilesystem: - description: Whether this container - has a read-only root filesystem. Default - is false. - type: boolean - runAsGroup: - description: The GID to run the entrypoint - of the container process. Uses runtime - default if unset. May also be set - in PodSecurityContext. If set in - both SecurityContext and PodSecurityContext, - the value specified in SecurityContext - takes precedence. - format: int64 - type: integer - runAsNonRoot: - description: Indicates that the container - must run as a non-root user. If true, - the Kubelet will validate the image - at runtime to ensure that it does - not run as UID 0 (root) and fail to - start the container if it does. If - unset or false, no such validation - will be performed. May also be set - in PodSecurityContext. If set in - both SecurityContext and PodSecurityContext, - the value specified in SecurityContext - takes precedence. - type: boolean - runAsUser: - description: The UID to run the entrypoint - of the container process. Defaults - to user specified in image metadata - if unspecified. May also be set in - PodSecurityContext. If set in both - SecurityContext and PodSecurityContext, - the value specified in SecurityContext - takes precedence. - format: int64 - type: integer - seLinuxOptions: - description: The SELinux context to - be applied to the container. If unspecified, - the container runtime will allocate - a random SELinux context for each - container. May also be set in PodSecurityContext. If - set in both SecurityContext and PodSecurityContext, - the value specified in SecurityContext - takes precedence. - properties: - level: - description: Level is SELinux level - label that applies to the container. - type: string - role: - description: Role is a SELinux role - label that applies to the container. - type: string - type: - description: Type is a SELinux type - label that applies to the container. - type: string - user: - description: User is a SELinux user - label that applies to the container. - type: string - type: object - seccompProfile: - description: The seccomp options to - use by this container. If seccomp - options are provided at both the pod - & container level, the container options - override the pod options. - properties: - localhostProfile: - description: localhostProfile indicates - a profile defined in a file on - the node should be used. The profile - must be preconfigured on the node - to work. Must be a descending - path, relative to the kubelet's - configured seccomp profile location. - Must only be set if type is "Localhost". - type: string - type: - description: "type indicates which - kind of seccomp profile will be - applied. Valid options are: \n - Localhost - a profile defined - in a file on the node should be - used. RuntimeDefault - the container - runtime default profile should - be used. Unconfined - no profile - should be applied." - type: string - required: - - type - type: object - windowsOptions: - description: The Windows specific settings - applied to all containers. If unspecified, - the options from the PodSecurityContext - will be used. If set in both SecurityContext - and PodSecurityContext, the value - specified in SecurityContext takes - precedence. - properties: - gmsaCredentialSpec: - description: GMSACredentialSpec - is where the GMSA admission webhook - (https://github.com/kubernetes-sigs/windows-gmsa) - inlines the contents of the GMSA - credential spec named by the GMSACredentialSpecName - field. - type: string - gmsaCredentialSpecName: - description: GMSACredentialSpecName - is the name of the GMSA credential - spec to use. - type: string - hostProcess: - description: HostProcess determines - if a container should be run as - a 'Host Process' container. This - field is alpha-level and will - only be honored by components - that enable the WindowsHostProcessContainers - feature flag. Setting this field - without the feature flag will - result in errors when validating - the Pod. All of a Pod's containers - must have the same effective HostProcess - value (it is not allowed to have - a mix of HostProcess containers - and non-HostProcess containers). In - addition, if HostProcess is true - then HostNetwork must also be - set to true. - type: boolean - runAsUserName: - description: The UserName in Windows - to run the entrypoint of the container - process. Defaults to the user - specified in image metadata if - unspecified. May also be set in - PodSecurityContext. If set in - both SecurityContext and PodSecurityContext, - the value specified in SecurityContext - takes precedence. - type: string - type: object - type: object - startupProbe: - description: 'StartupProbe indicates that - the Pod has successfully initialized. - If specified, no other probes are executed - until this completes successfully. If - this probe fails, the Pod will be restarted, - just as if the livenessProbe failed. This - can be used to provide different probe - parameters at the beginning of a Pod''s - lifecycle, when it might take a long time - to load data or warm a cache, than during - steady-state operation. This cannot be - updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - properties: - exec: - description: One and only one of the - following should be specified. Exec - specifies the action to take. - properties: - command: - description: Command is the command - line to execute inside the container, - the working directory for the - command is root ('/') in the - container's filesystem. The command - is simply exec'd, it is not run - inside a shell, so traditional - shell instructions ('|', etc) - won't work. To use a shell, you - need to explicitly call out to - that shell. Exit status of 0 is - treated as live/healthy and non-zero - is unhealthy. - items: - type: string - type: array - type: object - failureThreshold: - description: Minimum consecutive failures - for the probe to be considered failed - after having succeeded. Defaults to - 3. Minimum value is 1. - format: int32 - type: integer - httpGet: - description: HTTPGet specifies the http - request to perform. - properties: - host: - description: Host name to connect - to, defaults to the pod IP. You - probably want to set "Host" in - httpHeaders instead. - type: string - httpHeaders: - description: Custom headers to set - in the request. HTTP allows repeated - headers. - items: - description: HTTPHeader describes - a custom header to be used in - HTTP probes - properties: - name: - description: The header field - name - type: string - value: - description: The header field - value - type: string - required: - - name - - value - type: object - type: array - path: - description: Path to access on the - HTTP server. - type: string - port: - anyOf: - - type: integer - - type: string - description: Name or number of the - port to access on the container. - Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - scheme: - description: Scheme to use for connecting - to the host. Defaults to HTTP. - type: string - required: - - port - type: object - initialDelaySeconds: - description: 'Number of seconds after - the container has started before liveness - probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - periodSeconds: - description: How often (in seconds) - to perform the probe. Default to 10 - seconds. Minimum value is 1. - format: int32 - type: integer - successThreshold: - description: Minimum consecutive successes - for the probe to be considered successful - after having failed. Defaults to 1. - Must be 1 for liveness and startup. - Minimum value is 1. - format: int32 - type: integer - tcpSocket: - description: 'TCPSocket specifies an - action involving a TCP port. TCP hooks - not yet supported TODO: implement - a realistic TCP lifecycle hook' - properties: - host: - description: 'Optional: Host name - to connect to, defaults to the - pod IP.' - type: string - port: - anyOf: - - type: integer - - type: string - description: Number or name of the - port to access on the container. - Number must be in the range 1 - to 65535. Name must be an IANA_SVC_NAME. - x-kubernetes-int-or-string: true - required: - - port - type: object - terminationGracePeriodSeconds: - description: Optional duration in seconds - the pod needs to terminate gracefully - upon probe failure. The grace period - is the duration in seconds after the - processes running in the pod are sent - a termination signal and the time - when the processes are forcibly halted - with a kill signal. Set this value - longer than the expected cleanup time - for your process. If this value is - nil, the pod's terminationGracePeriodSeconds - will be used. Otherwise, this value - overrides the value provided by the - pod spec. Value must be non-negative - integer. The value zero indicates - stop immediately via the kill signal - (no opportunity to shut down). This - is a beta field and requires enabling - ProbeTerminationGracePeriod feature - gate. Minimum value is 1. spec.terminationGracePeriodSeconds - is used if unset. - format: int64 - type: integer - timeoutSeconds: - description: 'Number of seconds after - which the probe times out. Defaults - to 1 second. Minimum value is 1. More - info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' - format: int32 - type: integer - type: object - stdin: - description: Whether this container should - allocate a buffer for stdin in the container - runtime. If this is not set, reads from - stdin in the container will always result - in EOF. Default is false. - type: boolean - stdinOnce: - description: Whether the container runtime - should close the stdin channel after it - has been opened by a single attach. When - stdin is true the stdin stream will remain - open across multiple attach sessions. - If stdinOnce is set to true, stdin is - opened on container start, is empty until - the first client attaches to stdin, and - then remains open and accepts data until - the client disconnects, at which time - stdin is closed and remains closed until - the container is restarted. If this flag - is false, a container processes that reads - from stdin will never receive an EOF. - Default is false - type: boolean - terminationMessagePath: - description: 'Optional: Path at which the - file to which the container''s termination - message will be written is mounted into - the container''s filesystem. Message written - is intended to be brief final status, - such as an assertion failure message. - Will be truncated by the node if greater - than 4096 bytes. The total message length - across all containers will be limited - to 12kb. Defaults to /dev/termination-log. - Cannot be updated.' - type: string - terminationMessagePolicy: - description: Indicate how the termination - message should be populated. File will - use the contents of terminationMessagePath - to populate the container status message - on both success and failure. FallbackToLogsOnError - will use the last chunk of container log - output if the termination message file - is empty and the container exited with - an error. The log output is limited to - 2048 bytes or 80 lines, whichever is smaller. - Defaults to File. Cannot be updated. - type: string - tty: - description: Whether this container should - allocate a TTY for itself, also requires - 'stdin' to be true. Default is false. - type: boolean - volumeDevices: - description: volumeDevices is the list of - block devices to be used by the container. - items: - description: volumeDevice describes a - mapping of a raw block device within - a container. - properties: - devicePath: - description: devicePath is the path - inside of the container that the - device will be mapped to. - type: string - name: - description: name must match the name - of a persistentVolumeClaim in the - pod - type: string - required: - - devicePath - - name - type: object - type: array - volumeMounts: - description: Pod volumes to mount into the - container's filesystem. Cannot be updated. - items: - description: VolumeMount describes a mounting - of a Volume within a container. - properties: - mountPath: - description: Path within the container - at which the volume should be mounted. Must - not contain ':'. - type: string - mountPropagation: - description: mountPropagation determines - how mounts are propagated from the - host to container and the other - way around. When not set, MountPropagationNone - is used. This field is beta in 1.10. - type: string - name: - description: This must match the Name - of a Volume. - type: string - readOnly: - description: Mounted read-only if - true, read-write otherwise (false - or unspecified). Defaults to false. - type: boolean - subPath: - description: Path within the volume - from which the container's volume - should be mounted. Defaults to "" - (volume's root). - type: string - subPathExpr: - description: Expanded path within - the volume from which the container's - volume should be mounted. Behaves - similarly to SubPath but environment - variable references $(VAR_NAME) - are expanded using the container's - environment. Defaults to "" (volume's - root). SubPathExpr and SubPath are - mutually exclusive. - type: string - required: - - mountPath - - name - type: object - type: array - workingDir: - description: Container's working directory. - If not specified, the container runtime's - default will be used, which might be configured - in the container image. Cannot be updated. - type: string - required: - - name type: object type: array + x-kubernetes-preserve-unknown-fields: true timeout: description: Timeout defines the maximum amount of time Velero should wait for the initContainers diff --git a/config/crd/v1/bases/velero.io_volumesnapshotlocations.yaml b/config/crd/v1/bases/velero.io_volumesnapshotlocations.yaml index 56ef139d2..b47713497 100644 --- a/config/crd/v1/bases/velero.io_volumesnapshotlocations.yaml +++ b/config/crd/v1/bases/velero.io_volumesnapshotlocations.yaml @@ -13,6 +13,8 @@ spec: kind: VolumeSnapshotLocation listKind: VolumeSnapshotLocationList plural: volumesnapshotlocations + shortNames: + - vsl singular: volumesnapshotlocation scope: Namespaced versions: diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 0b43e2f54..508a08421 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -24,6 +24,26 @@ rules: - pods verbs: - get +- apiGroups: + - velero.io + resources: + - backuprepositories + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - velero.io + resources: + - backuprepositories/status + verbs: + - get + - patch + - update - apiGroups: - velero.io resources: @@ -131,26 +151,6 @@ rules: - get - patch - update -- apiGroups: - - velero.io - resources: - - resticrepositories - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - velero.io - resources: - - resticrepositories/status - verbs: - - get - - patch - - update - apiGroups: - velero.io resources: @@ -191,3 +191,15 @@ rules: - get - patch - update +- apiGroups: + - velero.io + resources: + - volumesnapshotlocations + verbs: + - create + - delete + - get + - list + - patch + - update + - watch diff --git a/design/unified-repo-and-kopia-integration/br-workflow.png b/design/unified-repo-and-kopia-integration/br-workflow.png new file mode 100644 index 000000000..6d935c7d0 Binary files /dev/null and b/design/unified-repo-and-kopia-integration/br-workflow.png differ diff --git a/design/unified-repo-and-kopia-integration/debug-log-repository.png b/design/unified-repo-and-kopia-integration/debug-log-repository.png new file mode 100644 index 000000000..640d12944 Binary files /dev/null and b/design/unified-repo-and-kopia-integration/debug-log-repository.png differ diff --git a/design/unified-repo-and-kopia-integration/debug-log-uploader.png b/design/unified-repo-and-kopia-integration/debug-log-uploader.png new file mode 100644 index 000000000..7dcd2bdcb Binary files /dev/null and b/design/unified-repo-and-kopia-integration/debug-log-uploader.png differ diff --git a/design/unified-repo-and-kopia-integration/maintenance-workflow.png b/design/unified-repo-and-kopia-integration/maintenance-workflow.png new file mode 100644 index 000000000..938eb45d2 Binary files /dev/null and b/design/unified-repo-and-kopia-integration/maintenance-workflow.png differ diff --git a/design/unified-repo-and-kopia-integration/progress-update.png b/design/unified-repo-and-kopia-integration/progress-update.png new file mode 100644 index 000000000..33305332b Binary files /dev/null and b/design/unified-repo-and-kopia-integration/progress-update.png differ diff --git a/design/unified-repo-and-kopia-integration/scope.png b/design/unified-repo-and-kopia-integration/scope.png new file mode 100644 index 000000000..5ff97da03 Binary files /dev/null and b/design/unified-repo-and-kopia-integration/scope.png differ diff --git a/design/unified-repo-and-kopia-integration/snapshot-deletion-workflow.png b/design/unified-repo-and-kopia-integration/snapshot-deletion-workflow.png new file mode 100644 index 000000000..7a31a8057 Binary files /dev/null and b/design/unified-repo-and-kopia-integration/snapshot-deletion-workflow.png differ diff --git a/design/unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md b/design/unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md new file mode 100644 index 000000000..5b4a897e2 --- /dev/null +++ b/design/unified-repo-and-kopia-integration/unified-repo-and-kopia-integration.md @@ -0,0 +1,469 @@ +# Unified Repository & Kopia Integration Design + +## Glossary & Abbreviation + +**BR**: Backup & Restore +**Backup Storage**: The storage that meets BR requirements, for example, scalable, durable, cost-effective, etc., therefore, Backup Storage is usually implemented as Object storage or File System storage, it may be on-premise or in cloud. Backup Storage is not BR specific necessarily, so it usually doesn’t provide most of the BR related features. On the other hand, storage vendors may provide BR specific storages that include some BR features like deduplication, compression, encryption, etc. For a standalone BR solution (i.e. Velero), the Backup Storage is not part of the solution, it is provided by users, so the BR solution should not assume the BR related features are always available from the Backup Storage. +**Backup Repository**: Backup repository is layered between BR data movers and Backup Storage to provide BR related features. Backup Repository is a part of BR solution, so generally, BR solution by default leverages the Backup Repository to provide the features because Backup Repository is always available; when Backup Storage provides duplicated features, and the latter is more beneficial (i.e., performance is better), BR solution should have the ability to opt to use the Backup Storage’s implementation. +**Data Mover**: The BR module to read/write data from/to workloads, the aim is to eliminate the differences of workloads. +**TCO**: Total Cost of Ownership. This is a general criteria for products/solutions, but also means a lot for BR solutions. For example, this means what kind of backup storage (and its cost) it requires, the retention policy of backup copies, the ways to remove backup data redundancy, etc. +**RTO**: Recovery Time Objective. This is the duration of time that users’ business can recover after a disaster. + +## Background + +As a Kubernetes BR solution, Velero is pursuing the capability to back up data from the volatile and limited production environment into the durable, heterogeneous and scalable backup storage. This relies on two parts: + +- Move data from various production workloads. The data mover has this role. Depending on the type of workload, Velero needs different data movers. For example, file system data mover, block data mover, and data movers for specific applications. At present, Velero supports moving file system data from PVs through Restic, which plays the role of the File System Data Mover. +- Persist data in backup storage. For a BR solution, this is the responsibility of the backup repository. Specifically, the backup repository is required to: + - Efficiently save data so as to reduce TCO. For example, deduplicate and compress the data before saving it + - Securely save data so as to meet security criteria. For example, encrypt the data on rest, make the data immutable after backup, and detect/protect from ransomware + - Efficiently retrieve data during restore so as to meet RTO. For example, restore a small unit of data or data associated with a small span of time + - Effectively manage data from all kinds of data movers in all kinds of backup storage. This means 2 things: first, apparently, backup storages are different from each other; second, some data movers may save quite different data from others, for example, some data movers save a portion of the logical object for each backup and need to visit and manage the portions as an entire logic object, aka. incremental backup. The backup repository needs to provide unified functionalities to eliminate the differences from the both ends + - Provide scalabilities so that users could assign resources (CPU, memory, network, etc.) in a flexible way to the backup repository since backup repository contains resource consuming modules + +At present, Velero provides some of these capabilities by leveraging Restic (e.g., deduplication and encryption on rest). This means that in addition to being a data mover for file system level data, Restic also plays the role of a backup repository, albeit one that is incomplete and limited: + +- Restic is an inseparable unit made up of a file system data mover and a repository. This means that the repository capabilities are only available for Restic file system backup. We cannot provide the same capabilities to other data movers using Restic. +- The backup storage Velero supports through our Restic backup path depends on the storage Restic supports. As a result, if there is a requirement to introduce backup storage that Restic doesn’t support, we have no way to make it. +- There is no way to enhance or extend the repository capabilities, because of the same reason – Restic is an inseparable unit, we cannot insert one or more customized layers to make the enhancements and extensions. + +Moreover, as reflected by user-reported issues, Restic seems to have many performance issues on both the file system data mover side and the repository side. + +On the other hand, based on a previous analysis and testing, we found that Kopia has better performance, with more features and more suitable to fulfill Velero’s repository targets (Kopia’s architecture divides modules more clearly according to their responsibilities, every module plays a complete role with clear interfaces. This makes it easier to take individual modules to Velero without losing critical functionalities). + +## Goals + +- Define a Unified Repository Interface that various data movers could interact with. This is for below purposes: + - All kinds of data movers acquire the same set of backup repository capabilities very easily + - Provide the possibility to plugin in different backup repositories/backup storages without affecting the upper layers + - Provide the possibility to plugin in modules between data mover and backup repository, so as to extend the repository capabilities + - Provide the possibility to scale the backup repository without affecting the upper layers +- Use Kopia repository to implement the Unified Repository +- Use Kopia uploader as the file system data mover for Pod Volume Backup +- Have Kopia uploader calling the Unified Repository Interface and save/retrieve data to/from the Unified Repository +- Make Kopia uploader generic enough to move any file system data so that other data movement cases could use it +- Use the existing logic or add new logic to manage the unified repository and Kopia uploader +- Preserve the legacy Restic path, this is for the consideration of backward compatibility + +## Non-Goals + +- The Unified Repository supports all kinds of data movers to save logic objects into it. How these logic objects are organized for a specific data mover (for example, how a volume’s block data is organized and represented by a unified repository object) should be included in the related data mover design. +- At present, Velero saves Kubernetes resources, backup metedata, debug logs separately. Eventually, we want to save them in the Unified Repository. How to organize these data into the Unified Repository should be included in a separate design. +- For PodVolume BR, this design focuses on the data path only, other parts beyond the data read/write and data persistency are irrelevant and kept unchanged. +- Kopia uploader is made generic enough to move any file system data. How it is integrated in other cases, is irrelevant to this design. Take CSI snapshot backup for example, how the snapshot is taken and exposed to Kopia uploader should be included in the related data mover design. +- The adanced modes of the Unified Repository, for example, backup repository/storage plugin, backup repository extension, etc. are not included in this design. We will have separate designs to cover them whenever necessary. + +## Architecture of Unified Repository + +Below shows the primary modules and their responsibilities: + +- Kopia uploader, as been well isolated, could move all file system data either from the production PV (as Velero’s PodVolume BR does), or from any kind of snapshot (i.e., CSI snapshot). +- Unified Repository Interface, data movers call the Unified Repository Interface to write/read data to/from the Unified Repository. +- Kopia repository layers, CAOS and CABS, work as the backup repository and expose the Kopia Repository interface. +- A Kopia Repository Library works as an adapter between Unified Repository Interface and Kopia Repository interface. Specifically, it implements Unified Repository Interface and calls Kopia Repository interface. +- At present, there is only one kind of backup repository -- Kopia Repository. If a new backup repository/storage is required, we need to create a new Library as an adapter to the Unified Repository Interface +- At present, the Kopia Repository works as a single piece in the same process of the caller, in future, we may run its CABS into a dedicated process or node. +- At present, we don’t have a requirement to extend the backup repository, if needed, an extra module could be added as an upper layer into the Unified Repository without changing the data movers. + +Neither Kopia uploader nor Kopia Repository is invoked through CLI, instead, they are invoked through code interfaces, because we need to do lots of customizations. + +The Unified Repository takes two kinds of data: +- Unified Repository Object: This is the user's logical data, for example, files/directories, blocks of a volume, data of a database, etc. +- Unified Repository Manifest: This could include all other data to maintain the object data, for example, snapshot information, etc. + +For Unified Repository Object/Manifest, a brief guidance to data movers are as below: +- Data movers treat the simple unit of data they recognize as an Object. For example, file system data movers treat a file or a directory as an Object; block data movers treat a volume as an Object. However, it is unnecessary that every data mover has a unique data format in the Unified Repository, to the opposite, it is recommended that data movers could share the data formats unless there is any reason not to, in this way, the data generated by one data mover could be used by other data movers. +- Data movers don't need to care about the differences between full and incremental backups regarding the data organization. Data movers always have full views of their objects, if an object is partially written, they use the object writer's Seek function to skip the unchanged parts +- Unified Repository may divide the data movers' logical Object into sub-objects or slices, or append internal metadata, but they are transparent to data movers +- Every Object has an unified identifier, in order to retrieve the Object later, data movers need to save the identifiers into the snapshot information. The snapshot information is saved as a Manifest. +- Manifests could hold any kind of small piece data in a K-V manner. Inside the backup repository, these kinds of data may be processed differently from Object data, but it is transparent to data movers. +- A Manifest also has an unified identifier, the Unified Repository provides the capabilities to list all the Manifests or a specified Manifest by its identifier, or a specified Manifest by its name, or a set of Manifests by their labels. + +![A Unified Repository Architecture](unified-repo.png) + +Velero by default uses the Unified Repository for all kinds of data movement, it is also able to integrate with other data movement paths from any party, for any purpose. Details are concluded as below: + +- Built-in Data Path: this is the default data movement path, which uses Velero built-in data movers to backup/restore workloads, the data is written to/read from the Unified Repository. +- Data Mover Replacement: Any party could write its own data movers and plug them into Velero. Meanwhile, these plugin data movers could also write/read data to/from Velero’s Unified Repository so that these data movers could expose the same capabilities that provided by the Unified Repository. In order to do this, the data mover providers need to call the Unified Repository Interface from inside their plugin data movers. +- Data Path Replacement: Some vendors may already have their own data movers and backup repository and they want to replace Velero’s entire data path (including data movers and backup repository). In this case, the providers only need to implement their plugin data movers, all the things downwards are a black box to Velero and managed by providers themselves (including API call, data transport, installation, life cycle management, etc.). Therefore, this case is out of the scope of Unified Repository. +![A Scope](scope.png) + +# Detailed Design + +## The Unified Repository Interface +Below are the definitions of the Unified Repository Interface. All the functions are synchronization functions. +``` +///BackupRepoService is used to initialize, open or maintain a backup repository +type BackupRepoService interface { + ///Create a backup repository or connect to an existing backup repository + ///repoOption: option to the backup repository and the underlying backup storage + ///createNew: indicates whether to create a new or connect to an existing backup repository + ///result: the backup repository specific output that could be used to open the backup repository later + Init(ctx context.Context, repoOption RepoOptions, createNew bool) error + + ///Open an backup repository that has been created/connected + ///repoOption: options to open the backup repository and the underlying storage + Open(ctx context.Context, repoOption RepoOptions) (BackupRepo, error) + + ///Periodically called to maintain the backup repository to eliminate redundant data and improve performance + ///repoOption: options to maintain the backup repository + Maintain(ctx context.Context, repoOption RepoOptions) error +} + +///BackupRepo provides the access to the backup repository +type BackupRepo interface { + ///Open an existing object for read + ///id: the object's unified identifier + OpenObject(ctx context.Context, id ID) (ObjectReader, error) + + ///Get a manifest data + GetManifest(ctx context.Context, id ID, mani *RepoManifest) error + + ///Get one or more manifest data that match the given labels + FindManifests(ctx context.Context, filter ManifestFilter) ([]*ManifestEntryMetadata, error) + + ///Create a new object and return the object's writer interface + ///return: A unified identifier of the object on success + NewObjectWriter(ctx context.Context, opt ObjectWriteOptions) ObjectWriter + + ///Save a manifest object + PutManifest(ctx context.Context, mani RepoManifest) (ID, error) + + ///Delete a manifest object + DeleteManifest(ctx context.Context, id ID) error + + ///Flush all the backup repository data + Flush(ctx context.Context) error + + ///Get the local time of the backup repository. It may be different from the time of the caller + Time() time.Time + + ///Close the backup repository + Close(ctx context.Context) error +} + +type ObjectReader interface { + io.ReadCloser + io.Seeker + + ///Length returns the logical size of the object + Length() int64 +} + +type ObjectWriter interface { + io.WriteCloser + + ///For some cases, i.e. block incremental, the object is not written sequentially + io.Seeker + + // Periodically called to preserve the state of data written to the repo so far + // Return a unified identifier that represent the current state + // An empty ID could be returned on success if the backup repository doesn't support this + Checkpoint() (ID, error) + + ///Wait for the completion of the object write + ///Result returns the object's unified identifier after the write completes + Result() (ID, error) +} +``` + +Some data structure & constants used by the interfaces: +``` +type RepoOptions struct { + ///A repository specific string to identify a backup storage, i.e., "s3", "filesystem" + StorageType string + ///Backup repository password, if any + RepoPassword string + ///A custom path to save the repository's configuration, if any + ConfigFilePath string + ///Other repository specific options + GeneralOptions map[string]string + ///Storage specific options + StorageOptions map[string]string +} + +///ObjectWriteOptions defines the options when creating an object for write +type ObjectWriteOptions struct { + FullPath string ///Full logical path of the object + Description string ///A description of the object, could be empty + Prefix ID ///A prefix of the name used to save the object + AccessMode int ///OBJECT_DATA_ACCESS_* + BackupMode int ///OBJECT_DATA_BACKUP_* +} + +const ( + ///Below consts defines the access mode when creating an object for write + OBJECT_DATA_ACCESS_MODE_UNKNOWN int = 0 + OBJECT_DATA_ACCESS_MODE_FILE int = 1 + OBJECT_DATA_ACCESS_MODE_BLOCK int = 2 + + OBJECT_DATA_BACKUP_MODE_UNKNOWN int = 0 + OBJECT_DATA_BACKUP_MODE_FULL int = 1 + OBJECT_DATA_BACKUP_MODE_INC int = 2 +) + +///ManifestEntryMetadata is the metadata describing one manifest data +type ManifestEntryMetadata struct { + ID ID ///The ID of the manifest data + Length int32 ///The data size of the manifest data + Labels map[string]string ///Labels saved together with the manifest data + ModTime time.Time ///Modified time of the manifest data +} + +type RepoManifest struct { + Payload interface{} ///The user data of manifest + Metadata *ManifestEntryMetadata ///The metadata data of manifest +} + +type ManifestFilter struct { + Labels map[string]string +} +``` + +## Workflow + +### Backup & Restore Workflow + +We preserve the bone of the existing BR workflow, that is: + +- Still use the Velero Server pod and VeleroNodeAgent daemonSet (originally called Restic daemonset) pods to hold the corresponding controllers and modules +- Still use the Backup/Restore CR and BackupRepository CR (originally called ResticRepository CR) to drive the BR workflow + +The modules in gray color in below diagram are the existing modules and with no significant changes. +In the new design, we will have separate and independent modules/logics for backup repository and uploader (data mover), specifically: + +- Repository Provider provides functionalities to manage the backup repository. For example, initialize a repository, connect to a repository, manage the snapshots in the repository, maintain a repository, etc. +- Uploader Provider provides functionalities to run a backup or restore. + +The Repository Provider and Uploader Provider use options to choose the path --- legacy path vs. new path (Kopia uploader + Unified Repository). Specifically, for legacy path, Repository Provider will manage Restic Repository only, otherwise, it manages Unified Repository only; for legacy path, Uploader Provider calls Restic to do the BR, otherwise, it calls Kopia uploader to do the BR. + +In order to manage Restic Repository, the Repository Provider calls Restic Repository Provider, the latter invokes the existing Restic CLIs. +In order to manage Unified Repository, the Repository Provider calls Unified Repository Provider, the latter calls the Unified Repository module through the udmrepo.BackupRepoService interface. It doesn’t know how the Unified Repository is implemented necessarily. +In order to use Restic to do BR, the Uploader Provider calls Restic Uploader Provider, the latter invokes the existing Restic CLIs. +In order to use Kopia to do BR, the Uploader Provider calls Kopia Uploader Provider, the latter do the following things: + +- Call Unified Repository through the udmrepo.BackupRepoService interface to open the unified repository for read/write. Again, it doesn’t know how the Unified Repository is implemented necessarily. It gets a BackupRepo’s read/write handle after the call succeeds +- Wrap the BackupRepo handle into a Kopia Shim which implements Kopia Repository interface +- Call the Kopia Uploader. Kopia Uploader is a Kopia module without any change, so it only understands Kopia Repository interface +- Kopia Uploader starts to backup/restore the corresponding PV’s file system data and write/read data to/from the provided Kopia Repository implementation, that is, Kopia Shim here +- When read/write calls go into Kopia Shim, it in turn calls the BackupRepo handle for read/write +- Finally, the read/write calls flow to Unified Repository module + +The Unified Repository provides all-in-one functionalities of a Backup Repository and exposes the Unified Repository Interface. Inside, Kopia Library is an adapter for Kopia Repository to translate the Unified Repository Interface calls to Kopia Repository interface calls. +Both Kopia Shim and Kopia Library rely on Kopia Repository interface, so we need to have some Kopia version control. We may need to change Kopia Shim and Kopia Library when upgrading Kopia to a new version and the Kopia Repository interface has some changes in the new version. +![A BR Workflow](br-workflow.png) +The modules in blue color in below diagram represent the newly added modules/logics or reorganized logics. +The modules in yellow color in below diagram represent the called Kopia modules without changes. + +### Delete Snapshot Workflow +The Delete Snapshot workflow follows the similar manner with BR workflow, that is, we preserve the upper-level workflows until the calls reach to BackupDeletionController, then: +- Leverage Repository Provider to switch between Restic implementation and Unified Repository implementation in the same mechanism as BR +- For Restic implementation, the Restic Repository Provider invokes the existing “Forget” Restic CLI +- For Unified Repository implementation, the Unified Repository Provider calls udmrepo.BackupRepo’s DeleteManifest to delete a snapshot +![A Snapshot Deletion Workflow](snapshot-deletion-workflow.png) + +### Maintenance Workflow +Backup Repository/Backup Storage may need to periodically reorganize its data so that it could guarantee its QOS during the long-time service. Some Backup Repository/Backup Storage does this in background automatically, so the user doesn’t need to interfere; some others need the caller to explicitly call their maintenance interface periodically. Restic and Kopia both go with the second way, that is, Velero needs to periodically call their maintenance interface. +Velero already has an existing workflow to call Restic maintenance (it is called “Prune” in Restic, so Velero uses the same word). The existing workflow is as follows: +- The Prune is triggered at the time of the backup +- When a BackupRepository CR (originally called ResticRepository CR) is created by PodVolumeBackup/Restore Controller, the BackupRepository controller checks if it reaches to the Prune Due Time, if so, it calls PruneRepo +- In the new design, the Repository Provider implements PruneRepo call, it uses the same way to switch between Restic Repository Provider and Unified Repository Provider, then: + - For Restic Repository, Restic Repository Provider invokes the existing “Prune” CLI of Restic + - For Unified Repository, Unified Repository Provider calls udmrepo.BackupRepoService’s Maintain function + +Kopia has two maintenance modes – the full maintenance and quick maintenance. There are many differences between full and quick mode, but briefly speaking, quick mode only processes the hottest data (primarily, it is the metadata and index data), so quick maintenance is much faster than full maintenance. On the other hand, quick maintenance also scatters the burden of full maintenance so that the full maintenance could finish fastly and make less impact. We will also take this quick maintenance into Velero. +We will add a new Due Time to Velero, finally, we have two Prune Due Time: +- Normal Due Time: For Restic, this will invoke Restic Prune; for Unified Repository, this will invoke udmrepo.BackupRepoService’s Maintain(full) call and finally call Kopia’s full maintenance +- Quick Due Time: For Restic, this does nothing; for Unified Repository, this will invoke udmrepo.BackupRepoService’s Maintain(quick) call and finally call Kopia’s quick maintenance + +We assign different values to Normal Due Time and Quick Due Time, as a result of which, the quick maintenance happens more frequently than full maintenance. +![A Maintenance Workflow](maintenance-workflow.png) + +### Progress Update +Because Kopia Uploader is an unchanged Kopia module, we need to find a way to get its progress during the BR. +Kopia Uploader accepts a Progress interface to update rich information during the BR, so the Kopia Uploader Provider will implement a Kopia’s Progress interface and then pass it to Kopia Uploader during its initialization. +In this way, Velero will be able to get the progress as shown in the diagram below. +![A Progress Update](progress-update.png) + +### Logs +In the current design, Velero is using two unchanged Kopia modules --- the Kopia Uploader and the Kopia Repository. Both will generate debug logs during their run. Velero will collect these logs in order to aid the debug. +Kopia’s Uploader and Repository both get the Logger information from the current GO Context, therefore, the Kopia Uploader Provider/Kopia Library could set the Logger interface into the current context and pass the context to Kopia Uploader/Kopia Repository. +Velero will set Logger interfaces separately for Kopia Uploader and Kopia Repository. In this way, the Unified Repository could serve other data movers without losing the debug log capability; and the Kopia Uploader could write to any repository without losing the debug log capability. +Kopia’s debug logs will be written to the same log file as Velero server or VeleroNodeAgent daemonset, so Velero doesn’t need to upload/download these debug logs separately. +![A Debug Log for Uploader](debug-log-uploader.png) +![A Debug Log for Repository](debug-log-repository.png) + +## Path Switch & Coexist +As mentioned above, There will be two paths. The related controllers need to identify the path during runtime and adjust its working mode. +According to the requirements, path changing is fulfilled at the backup/restore level. In order to let the controllers know the path, we need to add some option values. Specifically, there will be option/mode values for path selection in two places: +- Add the “uploader-type” option as a parameter of the Velero server. The parameters will be set by the installation. Currently the option has two values, either "restic" or "kopia" (in future, we may add other file system uploaders, then we will have more values). +- Add a "uploaderType" value in the PodVolume Backup/Restore CR and a "repositoryType" value in the BackupRepository CR. "uploaderType" currently has two values , either "restic" or "kopia"; "repositoryType" currently has two values, either "restic" or "kopia" (in future, the Unified Repository could opt among multiple backup repository/backup storage, so there may be more values. This is a good reason that repositoryType is a multivariate flag, however, in which way to opt among the backup repository/backup storage is not covered in this PR). If the values are missing in the CRs, it by default means "uploaderType=restic" and "repositoryType=restic", so the legacy CRs are handled correctly by Restic. + +The corresponding controllers handle the CRs by checking the CRs' path value. Some examples are as below: +- The PodVolume BR controller checks the "uploaderType" value from PodVolume CRs and decide its working path +- The BackupRepository controller checks the "repositoryType" value from BackupRepository CRs and decide its working path +- The Backup controller that runs in Velero server checks its “uploader-type” parameter to decide the path for the Backup it is going to create and then create the PodVolume Backup CR and BackupRepository CR +- The Restore controller checks the Backup, from which it is going to restore, for the path and then create the PodVolume Restore CR and BackupRepository CR + +As described above, the “uploader-type” parameter of the Velero server is only used to decide the path when creating a new Backup, for other cases, the path selection is driven by the related CRs. Therefore, we only need to add this parameter to the Velero server. + +## Velero CR Name Changes +We will change below CRs' name to make them more generic: +- "ResticRepository" CR to "BackupRepository" CR + +This means, we add a new CR type and deprecate the old one. As a result, if users upgrade from the old release, the old CRs will be orphaned, Velero will neither refer to it nor manage it, users need to delete these CRs manually. +As a side effect, when upgrading from an old release, even though the path is not changed, the BackupRepository gets created all the time, because Velero will not refer to the old CR's status. This seems to cause the repository to initialize more than once, however, it won't happen. In the BackupRepository controller, before initializing a repository, it always tries to connect to the repository first, if it is connectable, it won't do the initialization. +When backing up with the new release, Velero always creates BackupRepository CRs instead of ResticRepository CRs. +When restoring from an old backup, Velero always creates BackupRepository CRs instead of ResticRepository CRs. +When there are already backups or restores running during the upgrade, since after upgrade, the Velero server pods and VeleroNodeAgent daemonset pods are restarted, the existing backups/restores will fail immediately. + +## Storage Configuration +The backup repository needs some parameters to connect to various backup storage. For example, for a S3 compatible storage, the parameters may include bucket name, region, endpoint, etc. Different backup storage have totally different parameters. BackupRepository CRs, PodVolume Backup CRs and PodVolume Restore CRs save these parameters in their spec, as a string called repoIdentififer. The format of the string is for S3 storage only, it meets Restic CLI's requirements but is not enough for other backup repository. On the other hand, the parameters that are used to generate the repoIdentififer all come from the BackupStorageLocation. The latter has a map structure that could take parameters from any storage kind. +Therefore, for the new path, Velero uses the information in the BackupStorageLocation directly. That is, whenever Velero needs to initialize/connect to the Unified Repository, it acquires the storage configuration from the corresponding BackupStorageLocation. Then no more elements will be added in BackupRepository CRs, PodVolume Backup CRs or PodVolume Restore CRs. +The legacy path will be kept as is. That is, Velero still sets/gets the repoIdentififer in BackupRepository CRs, PodVolume Backup CRs and PodVolume Restore CRs and then passes to Restic CLI. + +## Installation + We will add a new flag "--pod-volume-backup-uploader" during installation. The flag has 3 meanings: + - It indicates PodVolume BR as the default method to protect PV data over other methods, i.e., durable snapshot. Therefore, the existing --use-restic option will be replaced + - It indicates the file system uploader to be used by PodVolume BR + - It implies the backup repository type manner, Restic if pod-volume-backup-uploader=restic, Unified Repository in all other cases + + The flag has below two values: + **"Restic"**: it means Velero will use Restic to do the pod volume backup. Therefore, the Velero server deployment will be created as below: + ``` + spec: + containers: + - args: + - server + - --features= + - --uploader-type=restic + command: + - /velero +``` +The BackupRepository CRs and PodVolume Backup/Restore CRs created in this case are as below: +``` +spec: + backupStorageLocation: default + maintenanceFrequency: 168h0m0s + repositoryType: restic + volumeNamespace: nginx-example +``` +``` +spec: + backupStorageLocation: default + node: aks-agentpool-27359964-vmss000000 + pod: + kind: Pod + name: nginx-stateful-0 + namespace: nginx-example + uid: 86aaec56-2b21-4736-9964-621047717133 + tags: + ... + uploaderType: restic + volume: nginx-log +``` +``` +spec: + backupStorageLocation: default + pod: + kind: Pod + name: nginx-stateful-0 + namespace: nginx-example + uid: e56d5872-3d94-4125-bfe8-8a222bf0fcf1 + snapshotID: 1741e5f1 + uploaderType: restic + volume: nginx-log +``` + **"Kopia"**: it means Velero will use Kopia uploader to do the pod volume backup (so it will use Unified Repository as the backup target). Therefore, the Velero server deployment will be created as below: + ``` + spec: + containers: + - args: + - server + - --features= + - --uploader-type=kopia + command: + - /velero +``` +The BackupRepository CRs created in this case are hard set with "kopia" at present, sice Kopia is the only option as a backup repository. The PodVolume Backup/Restore CRs are created with "kopia" as well: +``` +spec: + backupStorageLocation: default + maintenanceFrequency: 168h0m0s + repositoryType: kopia + volumeNamespace: nginx-example +``` +``` +spec: + backupStorageLocation: default + node: aks-agentpool-27359964-vmss000000 + pod: + kind: Pod + name: nginx-stateful-0 + namespace: nginx-example + uid: 86aaec56-2b21-4736-9964-621047717133 + tags: + ... + uploaderType: kopia + volume: nginx-log +``` +``` +spec: + backupStorageLocation: default + pod: + kind: Pod + name: nginx-stateful-0 + namespace: nginx-example + uid: e56d5872-3d94-4125-bfe8-8a222bf0fcf1 + snapshotID: 1741e5f1 + uploaderType: kopia + volume: nginx-log +``` +We will add the flag for both CLI installation and Helm Chart Installation. Specifically: +- Helm Chart Installation: add the "--pod-volume-backup-uploader" flag into its value.yaml and then generate the deployments according to the value. Value.yaml is the user-provided configuration file, therefore, users could set this value at the time of installation. The changes in Value.yaml are as below: +``` + command: + - /velero + args: + - server + {{- with .Values.configuration }} + {{- if .pod-volume-backup-uploader "restic" }} + - --legacy + {{- end }} +``` +- CLI Installation: add the "--pod-volume-backup-uploader" flag into the installation command line, and then create the two deployments accordingly. Users could change the option at the time of installation. The CLI is as below: +```velero install --pod-volume-backup-uploader=restic``` +```velero install --pod-volume-backup-uploader=kopia``` + +## Upgrade +For upgrade, we allow users to change the path by specifying "--pod-volume-backup-uploader" flag in the same way as the fresh installation. Therefore, the flag change should be applied to the Velero server after upgrade. Additionally, We need to add a label to Velero server to indicate the current path, so as to provide an easy for querying it. +Moreover, if users upgrade from the old release, we need to change the existing Restic Daemonset name to VeleroNodeAgent daemonSet. The name change should be applied after upgrade. +The recommended way for upgrade is to modify the related Velero resource directly through kubectl, the above changes will be applied in the same way. We need to modify the Velero doc for all these changes. + +## CLI +Below Velero CLI or its output needs some changes: +- ```Velero backup describe```: the output should indicate the path +- ```Velero restore describe```: the output should indicate the path +- ```Velero restic repo get```: the name of this CLI should be changed to a generic one, for example, "Velero repo get"; the output of this CLI should print all the backup repository if Restic repository and Unified Repository exist at the same time + +At present, we don't have a requirement for selecting the path during backup, so we don't change the ```Velero backup create``` CLI for now. If there is a requirement in future, we could simply add a flag similar to "--pod-volume-backup-uploader" to select the path. + +## CR Example +Below sample files demonstrate complete CRs with all the changes mentioned above: +- BackupRepository CR: https://gist.github.com/Lyndon-Li/f38ad69dd8c4785c046cd7ed0ef2b6ed#file-backup-repository-sample-yaml +- PodVolumeBackup CR: https://gist.github.com/Lyndon-Li/f38ad69dd8c4785c046cd7ed0ef2b6ed#file-pvb-sample-yaml +- PodVolumeRestore CR: https://gist.github.com/Lyndon-Li/f38ad69dd8c4785c046cd7ed0ef2b6ed#file-pvr-sample-yaml + +## User Perspective +This design aims to provide a flexible backup repository layer and a generic file system uploader, which are fundermental for PodVolume and other data movements. Although this will make Velero more capable, at present, we don't pursue to expose differentiated features end to end. Specifically: +- By default, Velero still uses Restic for PodVolume BR +- Even when changing to the new path, Velero still allows users to restore from the data backed up by Restic +- The capability of PodVolume BR under the new path is kept the same as it under Restic path and the same as the existing PodVolume BR +- The operational experiences are kept the same as much as possible, the known changes are listed below + +Below user experiences are changed for this design: +- Installation CLI change: a new option is added to the installation CLI, see the Installation section for details +- CR change: One or more existing CRs have been renamed, see the Velero CR Changes section for details +- Velero CLI name and output change, see the CLI section for details +- Velero daemonset name change +- Wording Alignment: as the existing situation, many places are using the word of "Restic", for example, "default-volume-to-restic" option, most of them are not accurate anymore, we will change these words and give a detailed list of the changes \ No newline at end of file diff --git a/design/unified-repo-and-kopia-integration/unified-repo.png b/design/unified-repo-and-kopia-integration/unified-repo.png new file mode 100644 index 000000000..609f6db58 Binary files /dev/null and b/design/unified-repo-and-kopia-integration/unified-repo.png differ diff --git a/hack/restore-crd-patch-v1.json b/hack/restore-crd-patch-v1.json deleted file mode 100644 index b8b729d0d..000000000 --- a/hack/restore-crd-patch-v1.json +++ /dev/null @@ -1,3 +0,0 @@ -[ - { "op": "replace", "path": "/spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/hooks/properties/resources/items/properties/postHooks/items/properties/init/properties/initContainers/items/properties/ports/items/required", "value": [ "containerPort", "protocol"] } -] diff --git a/hack/restore-crd-patch-v1beta1.json b/hack/restore-crd-patch-v1beta1.json deleted file mode 100644 index cbaa90a02..000000000 --- a/hack/restore-crd-patch-v1beta1.json +++ /dev/null @@ -1,3 +0,0 @@ -[ - { "op": "replace", "path": "/spec/validation/openAPIV3Schema/properties/spec/properties/hooks/properties/resources/items/properties/postHooks/items/properties/init/properties/initContainers/items/properties/ports/items/required", "value": [ "containerPort", "protocol"] } -] diff --git a/hack/update-generated-crd-code.sh b/hack/update-generated-crd-code.sh index 70b9a942b..fb7be399e 100755 --- a/hack/update-generated-crd-code.sh +++ b/hack/update-generated-crd-code.sh @@ -47,7 +47,7 @@ ${GOPATH}/src/k8s.io/code-generator/generate-groups.sh \ # Generate apiextensions.k8s.io/v1 # Generate manifests e.g. CRD, RBAC etc. controller-gen \ - crd:crdVersions=v1\ + crd:crdVersions=v1 \ paths=./pkg/apis/velero/v1/... \ rbac:roleName=velero-perms \ paths=./pkg/controller/... \ @@ -55,13 +55,4 @@ controller-gen \ object \ paths=./pkg/apis/velero/v1/... -# this is a super hacky workaround for https://github.com/kubernetes/kubernetes/issues/91395 -# which a result of fixing the validation on CRD objects. The validation ensures the fields that are list map keys, are either marked -# as required or have default values to ensure merging of list map items work as expected. -# With "containerPort" and "protocol" being considered as x-kubernetes-list-map-keys in the container ports, and "protocol" was not -# a required field, the CRD would fail validation with errors similar to the one reported in https://github.com/kubernetes/kubernetes/issues/91395. -# once controller-gen (above) is able to generate CRDs with `protocol` as a required field, this hack can be removed. -kubectl patch -f config/crd/v1/bases/velero.io_restores.yaml -p "$(cat hack/restore-crd-patch-v1.json)" --type=json --local=true -o yaml > /tmp/velero.io_restores-yaml.patched -mv /tmp/velero.io_restores-yaml.patched config/crd/v1/bases/velero.io_restores.yaml - go generate ./config/crd/v1/crds diff --git a/internal/credentials/getter.go b/internal/credentials/getter.go new file mode 100644 index 000000000..c890c0566 --- /dev/null +++ b/internal/credentials/getter.go @@ -0,0 +1,24 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentials + +// CredentialGetter is a collection of interfaces for interacting with credentials +// that are stored in different targets +type CredentialGetter struct { + FromFile FileStore + FromSecret SecretStore +} diff --git a/internal/credentials/mocks/FileStore.go b/internal/credentials/mocks/FileStore.go new file mode 100644 index 000000000..3fce9e843 --- /dev/null +++ b/internal/credentials/mocks/FileStore.go @@ -0,0 +1,49 @@ +// Code generated by mockery v2.14.0. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + v1 "k8s.io/api/core/v1" +) + +// FileStore is an autogenerated mock type for the FileStore type +type FileStore struct { + mock.Mock +} + +// Path provides a mock function with given fields: selector +func (_m *FileStore) Path(selector *v1.SecretKeySelector) (string, error) { + ret := _m.Called(selector) + + var r0 string + if rf, ok := ret.Get(0).(func(*v1.SecretKeySelector) string); ok { + r0 = rf(selector) + } else { + r0 = ret.Get(0).(string) + } + + var r1 error + if rf, ok := ret.Get(1).(func(*v1.SecretKeySelector) error); ok { + r1 = rf(selector) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type mockConstructorTestingTNewFileStore interface { + mock.TestingT + Cleanup(func()) +} + +// NewFileStore creates a new instance of FileStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewFileStore(t mockConstructorTestingTNewFileStore) *FileStore { + mock := &FileStore{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/credentials/mocks/SecretStore.go b/internal/credentials/mocks/SecretStore.go new file mode 100644 index 000000000..5494511c9 --- /dev/null +++ b/internal/credentials/mocks/SecretStore.go @@ -0,0 +1,49 @@ +// Code generated by mockery v2.14.0. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + v1 "k8s.io/api/core/v1" +) + +// SecretStore is an autogenerated mock type for the SecretStore type +type SecretStore struct { + mock.Mock +} + +// Get provides a mock function with given fields: selector +func (_m *SecretStore) Get(selector *v1.SecretKeySelector) (string, error) { + ret := _m.Called(selector) + + var r0 string + if rf, ok := ret.Get(0).(func(*v1.SecretKeySelector) string); ok { + r0 = rf(selector) + } else { + r0 = ret.Get(0).(string) + } + + var r1 error + if rf, ok := ret.Get(1).(func(*v1.SecretKeySelector) error); ok { + r1 = rf(selector) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type mockConstructorTestingTNewSecretStore interface { + mock.TestingT + Cleanup(func()) +} + +// NewSecretStore creates a new instance of SecretStore. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +func NewSecretStore(t mockConstructorTestingTNewSecretStore) *SecretStore { + mock := &SecretStore{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/credentials/secret_store.go b/internal/credentials/secret_store.go new file mode 100644 index 000000000..f4d2111a5 --- /dev/null +++ b/internal/credentials/secret_store.go @@ -0,0 +1,56 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentials + +import ( + "github.com/pkg/errors" + corev1api "k8s.io/api/core/v1" + kbclient "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/vmware-tanzu/velero/pkg/util/kube" +) + +// SecretStore defines operations for interacting with credentials +// that are stored in Secret. +type SecretStore interface { + // Get returns the secret key defined by the given selector + Get(selector *corev1api.SecretKeySelector) (string, error) +} + +type namespacedSecretStore struct { + client kbclient.Client + namespace string +} + +// NewNamespacedSecretStore returns a SecretStore which can interact with credentials +// for the given namespace. +func NewNamespacedSecretStore(client kbclient.Client, namespace string) (SecretStore, error) { + return &namespacedSecretStore{ + client: client, + namespace: namespace, + }, nil +} + +// Buffer returns the secret key defined by the given selector. +func (n *namespacedSecretStore) Get(selector *corev1api.SecretKeySelector) (string, error) { + creds, err := kube.GetSecretKey(n.client, n.namespace, selector) + if err != nil { + return "", errors.Wrap(err, "unable to get key for secret") + } + + return string(creds), nil +} diff --git a/internal/hook/item_hook_handler.go b/internal/hook/item_hook_handler.go index 96cb18bc6..83c756bd5 100644 --- a/internal/hook/item_hook_handler.go +++ b/internal/hook/item_hook_handler.go @@ -131,10 +131,10 @@ func (i *InitContainerRestoreHookHandler) HandleRestoreHooks( pod.Spec.InitContainers = pod.Spec.InitContainers[1:] } - hooksFromAnnotations := getInitRestoreHookFromAnnotation(kube.NamespaceAndName(pod), metadata.GetAnnotations(), log) - if hooksFromAnnotations != nil { + initContainerFromAnnotations := getInitContainerFromAnnotation(kube.NamespaceAndName(pod), metadata.GetAnnotations(), log) + if initContainerFromAnnotations != nil { log.Infof("Handling InitRestoreHooks from pod annotations") - initContainers = append(initContainers, hooksFromAnnotations.InitContainers...) + initContainers = append(initContainers, *initContainerFromAnnotations) } else { log.Infof("Handling InitRestoreHooks from RestoreSpec") // pod did not have the annotations appropriate for restore hooks @@ -155,7 +155,22 @@ func (i *InitContainerRestoreHookHandler) HandleRestoreHooks( } for _, hook := range rh.RestoreHooks { if hook.Init != nil { - initContainers = append(initContainers, hook.Init.InitContainers...) + containers := make([]corev1api.Container, 0) + for _, raw := range hook.Init.InitContainers { + container := corev1api.Container{} + err := ValidateContainer(raw.Raw) + if err != nil { + log.Errorf("invalid Restore Init hook: %s", err.Error()) + return nil, err + } + err = json.Unmarshal(raw.Raw, &container) + if err != nil { + log.Errorf("fail to Unmarshal hook Init into container: %s", err.Error()) + return nil, errors.WithStack(err) + } + containers = append(containers, container) + } + initContainers = append(initContainers, containers...) } } } @@ -350,7 +365,7 @@ type ResourceRestoreHook struct { RestoreHooks []velerov1api.RestoreResourceHook } -func getInitRestoreHookFromAnnotation(podName string, annotations map[string]string, log logrus.FieldLogger) *velerov1api.InitRestoreHook { +func getInitContainerFromAnnotation(podName string, annotations map[string]string, log logrus.FieldLogger) *corev1api.Container { containerImage := annotations[podRestoreHookInitContainerImageAnnotationKey] containerName := annotations[podRestoreHookInitContainerNameAnnotationKey] command := annotations[podRestoreHookInitContainerCommandAnnotationKey] @@ -373,15 +388,13 @@ func getInitRestoreHookFromAnnotation(podName string, annotations map[string]str log.Infof("Pod %s has no %s annotation, using generated name %s for initContainer", podName, podRestoreHookInitContainerNameAnnotationKey, containerName) } - return &velerov1api.InitRestoreHook{ - InitContainers: []corev1api.Container{ - { - Image: containerImage, - Name: containerName, - Command: parseStringToCommand(command), - }, - }, + initContainer := corev1api.Container{ + Image: containerImage, + Name: containerName, + Command: parseStringToCommand(command), } + + return &initContainer } // GetRestoreHooksFromSpec returns a list of ResourceRestoreHooks from the restore Spec. @@ -406,7 +419,7 @@ func GetRestoreHooksFromSpec(hooksSpec *velerov1api.RestoreHooks) ([]ResourceRes if rs.LabelSelector != nil { ls, err := metav1.LabelSelectorAsSelector(rs.LabelSelector) if err != nil { - return nil, errors.WithStack(err) + return []ResourceRestoreHook{}, errors.WithStack(err) } rh.Selector.LabelSelector = ls } @@ -526,3 +539,17 @@ func GroupRestoreExecHooks( return byContainer, nil } + +// ValidateContainer validate whether a map contains mandatory k8s Container fields. +// mandatory fields include name, image and commands. +func ValidateContainer(raw []byte) error { + container := corev1api.Container{} + err := json.Unmarshal(raw, &container) + if err != nil { + return err + } + if len(container.Command) <= 0 || len(container.Name) <= 0 || len(container.Image) <= 0 { + return fmt.Errorf("invalid InitContainer in restore hook, it doesn't have Command, Name or Image field") + } + return nil +} diff --git a/internal/hook/item_hook_handler_test.go b/internal/hook/item_hook_handler_test.go index 267c413a1..9f8267808 100644 --- a/internal/hook/item_hook_handler_test.go +++ b/internal/hook/item_hook_handler_test.go @@ -1191,11 +1191,11 @@ func TestGroupRestoreExecHooks(t *testing.T) { } } -func TestGetInitRestoreHookFromAnnotations(t *testing.T) { +func TestGetInitContainerFromAnnotations(t *testing.T) { testCases := []struct { name string inputAnnotations map[string]string - expected velerov1api.InitRestoreHook + expected *corev1api.Container expectNil bool }{ { @@ -1223,12 +1223,8 @@ func TestGetInitRestoreHookFromAnnotations(t *testing.T) { podRestoreHookInitContainerNameAnnotationKey: "", podRestoreHookInitContainerCommandAnnotationKey: "/usr/bin/data-populator /user-data full", }, - expected: velerov1api.InitRestoreHook{ - InitContainers: []corev1api.Container{ - *builder.ForContainer("restore-init1", "busy-box"). - Command([]string{"/usr/bin/data-populator /user-data full"}).Result(), - }, - }, + expected: builder.ForContainer("restore-init1", "busy-box"). + Command([]string{"/usr/bin/data-populator /user-data full"}).Result(), }, { name: "should generate container name when container name is missing", @@ -1237,22 +1233,14 @@ func TestGetInitRestoreHookFromAnnotations(t *testing.T) { podRestoreHookInitContainerImageAnnotationKey: "busy-box", podRestoreHookInitContainerCommandAnnotationKey: "/usr/bin/data-populator /user-data full", }, - expected: velerov1api.InitRestoreHook{ - InitContainers: []corev1api.Container{ - *builder.ForContainer("restore-init1", "busy-box"). - Command([]string{"/usr/bin/data-populator /user-data full"}).Result(), - }, - }, + expected: builder.ForContainer("restore-init1", "busy-box"). + Command([]string{"/usr/bin/data-populator /user-data full"}).Result(), }, { name: "should return expected init container when all annotations are specified", expectNil: false, - expected: velerov1api.InitRestoreHook{ - InitContainers: []corev1api.Container{ - *builder.ForContainer("restore-init1", "busy-box"). - Command([]string{"/usr/bin/data-populator /user-data full"}).Result(), - }, - }, + expected: builder.ForContainer("restore-init1", "busy-box"). + Command([]string{"/usr/bin/data-populator /user-data full"}).Result(), inputAnnotations: map[string]string{ podRestoreHookInitContainerImageAnnotationKey: "busy-box", podRestoreHookInitContainerNameAnnotationKey: "restore-init", @@ -1262,12 +1250,8 @@ func TestGetInitRestoreHookFromAnnotations(t *testing.T) { { name: "should return expected init container when all annotations are specified with command as a JSON array", expectNil: false, - expected: velerov1api.InitRestoreHook{ - InitContainers: []corev1api.Container{ - *builder.ForContainer("restore-init1", "busy-box"). - Command([]string{"a", "b", "c"}).Result(), - }, - }, + expected: builder.ForContainer("restore-init1", "busy-box"). + Command([]string{"a", "b", "c"}).Result(), inputAnnotations: map[string]string{ podRestoreHookInitContainerImageAnnotationKey: "busy-box", podRestoreHookInitContainerNameAnnotationKey: "restore-init", @@ -1277,12 +1261,8 @@ func TestGetInitRestoreHookFromAnnotations(t *testing.T) { { name: "should return expected init container when all annotations are specified with command as malformed a JSON array", expectNil: false, - expected: velerov1api.InitRestoreHook{ - InitContainers: []corev1api.Container{ - *builder.ForContainer("restore-init1", "busy-box"). - Command([]string{"[foobarbaz"}).Result(), - }, - }, + expected: builder.ForContainer("restore-init1", "busy-box"). + Command([]string{"[foobarbaz"}).Result(), inputAnnotations: map[string]string{ podRestoreHookInitContainerImageAnnotationKey: "busy-box", podRestoreHookInitContainerNameAnnotationKey: "restore-init", @@ -1293,15 +1273,14 @@ func TestGetInitRestoreHookFromAnnotations(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - actual := getInitRestoreHookFromAnnotation("test/pod1", tc.inputAnnotations, velerotest.NewLogger()) + actualInitContainer := getInitContainerFromAnnotation("test/pod1", tc.inputAnnotations, velerotest.NewLogger()) if tc.expectNil { - assert.Nil(t, actual) + assert.Nil(t, actualInitContainer) return } - assert.NotEmpty(t, actual.InitContainers[0].Name) - assert.Equal(t, len(tc.expected.InitContainers), len(actual.InitContainers)) - assert.Equal(t, tc.expected.InitContainers[0].Image, actual.InitContainers[0].Image) - assert.Equal(t, tc.expected.InitContainers[0].Command, actual.InitContainers[0].Command) + assert.NotEmpty(t, actualInitContainer.Name) + assert.Equal(t, tc.expected.Image, actualInitContainer.Image) + assert.Equal(t, tc.expected.Command, actualInitContainer.Command) }) } } @@ -1347,11 +1326,11 @@ func TestGetRestoreHooksFromSpec(t *testing.T) { PostHooks: []velerov1api.RestoreResourceHook{ { Init: &velerov1api.InitRestoreHook{ - InitContainers: []corev1api.Container{ - *builder.ForContainer("restore-init1", "busy-box"). - Command([]string{"foobarbaz"}).Result(), - *builder.ForContainer("restore-init2", "busy-box"). - Command([]string{"foobarbaz"}).Result(), + InitContainers: []runtime.RawExtension{ + builder.ForContainer("restore-init1", "busy-box"). + Command([]string{"foobarbaz"}).ResultRawExtension(), + builder.ForContainer("restore-init2", "busy-box"). + Command([]string{"foobarbaz"}).ResultRawExtension(), }, }, }, @@ -1369,11 +1348,11 @@ func TestGetRestoreHooksFromSpec(t *testing.T) { RestoreHooks: []velerov1api.RestoreResourceHook{ { Init: &velerov1api.InitRestoreHook{ - InitContainers: []corev1api.Container{ - *builder.ForContainer("restore-init1", "busy-box"). - Command([]string{"foobarbaz"}).Result(), - *builder.ForContainer("restore-init2", "busy-box"). - Command([]string{"foobarbaz"}).Result(), + InitContainers: []runtime.RawExtension{ + builder.ForContainer("restore-init1", "busy-box"). + Command([]string{"foobarbaz"}).ResultRawExtension(), + builder.ForContainer("restore-init2", "busy-box"). + Command([]string{"foobarbaz"}).ResultRawExtension(), }, }, }, @@ -1539,9 +1518,9 @@ func TestHandleRestoreHooks(t *testing.T) { RestoreHooks: []velerov1api.RestoreResourceHook{ { Init: &velerov1api.InitRestoreHook{ - InitContainers: []corev1api.Container{ - *builder.ForContainer("should-not exist", "does-not-matter"). - Command([]string{""}).Result(), + InitContainers: []runtime.RawExtension{ + builder.ForContainer("should-not exist", "does-not-matter"). + Command([]string{""}).ResultRawExtension(), }, }, }, @@ -1556,6 +1535,9 @@ func TestHandleRestoreHooks(t *testing.T) { Name: "app1", Namespace: "default", }, + Spec: corev1api.PodSpec{ + InitContainers: []corev1api.Container{}, + }, }, expectedError: nil, expectedPod: &corev1api.Pod{ @@ -1582,11 +1564,11 @@ func TestHandleRestoreHooks(t *testing.T) { RestoreHooks: []velerov1api.RestoreResourceHook{ { Init: &velerov1api.InitRestoreHook{ - InitContainers: []corev1api.Container{ - *builder.ForContainer("restore-init-container-1", "nginx"). - Command([]string{"a", "b", "c"}).Result(), - *builder.ForContainer("restore-init-container-2", "nginx"). - Command([]string{"a", "b", "c"}).Result(), + InitContainers: []runtime.RawExtension{ + builder.ForContainer("restore-init-container-1", "nginx"). + Command([]string{"a", "b", "c"}).ResultRawExtension(), + builder.ForContainer("restore-init-container-2", "nginx"). + Command([]string{"a", "b", "c"}).ResultRawExtension(), }, }, }, @@ -1643,11 +1625,11 @@ func TestHandleRestoreHooks(t *testing.T) { RestoreHooks: []velerov1api.RestoreResourceHook{ { Init: &velerov1api.InitRestoreHook{ - InitContainers: []corev1api.Container{ - *builder.ForContainer("restore-init-container-1", "nginx"). - Command([]string{"a", "b", "c"}).Result(), - *builder.ForContainer("restore-init-container-2", "nginx"). - Command([]string{"a", "b", "c"}).Result(), + InitContainers: []runtime.RawExtension{ + builder.ForContainer("restore-init-container-1", "nginx"). + Command([]string{"a", "b", "c"}).ResultRawExtension(), + builder.ForContainer("restore-init-container-2", "nginx"). + Command([]string{"a", "b", "c"}).ResultRawExtension(), }, }, }, @@ -1680,11 +1662,11 @@ func TestHandleRestoreHooks(t *testing.T) { RestoreHooks: []velerov1api.RestoreResourceHook{ { Init: &velerov1api.InitRestoreHook{ - InitContainers: []corev1api.Container{ - *builder.ForContainer("restore-init-container-1", "nginx"). - Command([]string{"a", "b", "c"}).Result(), - *builder.ForContainer("restore-init-container-2", "nginx"). - Command([]string{"a", "b", "c"}).Result(), + InitContainers: []runtime.RawExtension{ + builder.ForContainer("restore-init-container-1", "nginx"). + Command([]string{"a", "b", "c"}).ResultRawExtension(), + builder.ForContainer("restore-init-container-2", "nginx"). + Command([]string{"a", "b", "c"}).ResultRawExtension(), }, }, }, @@ -1733,11 +1715,11 @@ func TestHandleRestoreHooks(t *testing.T) { RestoreHooks: []velerov1api.RestoreResourceHook{ { Init: &velerov1api.InitRestoreHook{ - InitContainers: []corev1api.Container{ - *builder.ForContainer("restore-init-container-1", "nginx"). - Command([]string{"a", "b", "c"}).Result(), - *builder.ForContainer("restore-init-container-2", "nginx"). - Command([]string{"a", "b", "c"}).Result(), + InitContainers: []runtime.RawExtension{ + builder.ForContainer("restore-init-container-1", "nginx"). + Command([]string{"a", "b", "c"}).ResultRawExtension(), + builder.ForContainer("restore-init-container-2", "nginx"). + Command([]string{"a", "b", "c"}).ResultRawExtension(), }, }, }, @@ -1795,11 +1777,11 @@ func TestHandleRestoreHooks(t *testing.T) { RestoreHooks: []velerov1api.RestoreResourceHook{ { Init: &velerov1api.InitRestoreHook{ - InitContainers: []corev1api.Container{ - *builder.ForContainer("restore-init-container-1", "nginx"). - Command([]string{"a", "b", "c"}).Result(), - *builder.ForContainer("restore-init-container-2", "nginx"). - Command([]string{"a", "b", "c"}).Result(), + InitContainers: []runtime.RawExtension{ + builder.ForContainer("restore-init-container-1", "nginx"). + Command([]string{"a", "b", "c"}).ResultRawExtension(), + builder.ForContainer("restore-init-container-2", "nginx"). + Command([]string{"a", "b", "c"}).ResultRawExtension(), }, }, }, @@ -1868,9 +1850,9 @@ func TestHandleRestoreHooks(t *testing.T) { RestoreHooks: []velerov1api.RestoreResourceHook{ { Init: &velerov1api.InitRestoreHook{ - InitContainers: []corev1api.Container{ - *builder.ForContainer("restore-init-container-1", "nginx"). - Command([]string{"a", "b", "c"}).Result(), + InitContainers: []runtime.RawExtension{ + builder.ForContainer("restore-init-container-1", "nginx"). + Command([]string{"a", "b", "c"}).ResultRawExtension(), }, }, }, @@ -1911,9 +1893,9 @@ func TestHandleRestoreHooks(t *testing.T) { RestoreHooks: []velerov1api.RestoreResourceHook{ { Init: &velerov1api.InitRestoreHook{ - InitContainers: []corev1api.Container{ - *builder.ForContainer("restore-init-container-1", "nginx"). - Command([]string{"a", "b", "c"}).Result(), + InitContainers: []runtime.RawExtension{ + builder.ForContainer("restore-init-container-1", "nginx"). + Command([]string{"a", "b", "c"}).ResultRawExtension(), }, }, }, @@ -1922,6 +1904,37 @@ func TestHandleRestoreHooks(t *testing.T) { }, namespaceMapping: map[string]string{"default": "new"}, }, + { + name: "Invalid InitContainer in Restore hook should return nil as pod, and error.", + podInput: corev1api.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "app1", + Namespace: "new", + }, + Spec: corev1api.PodSpec{}, + }, + expectedError: fmt.Errorf("invalid InitContainer in restore hook, it doesn't have Command, Name or Image field"), + expectedPod: nil, + restoreHooks: []ResourceRestoreHook{ + { + Name: "hook1", + Selector: ResourceHookSelector{ + Namespaces: collections.NewIncludesExcludes().Includes("new"), + Resources: collections.NewIncludesExcludes().Includes(kuberesource.Pods.Resource), + }, + RestoreHooks: []velerov1api.RestoreResourceHook{ + { + Init: &velerov1api.InitRestoreHook{ + InitContainers: []runtime.RawExtension{ + builder.ForContainer("restore-init-container-1", "nginx"). + ResultRawExtension(), + }, + }, + }, + }, + }, + }, + }, } for _, tc := range testCases { @@ -1931,10 +1944,32 @@ func TestHandleRestoreHooks(t *testing.T) { assert.NoError(t, err) actual, err := handler.HandleRestoreHooks(velerotest.NewLogger(), kuberesource.Pods, &unstructured.Unstructured{Object: podMap}, tc.restoreHooks, tc.namespaceMapping) assert.Equal(t, tc.expectedError, err) - actualPod := new(corev1api.Pod) - err = runtime.DefaultUnstructuredConverter.FromUnstructured(actual.UnstructuredContent(), actualPod) - assert.NoError(t, err) - assert.Equal(t, tc.expectedPod, actualPod) + if actual != nil { + actualPod := new(corev1api.Pod) + err = runtime.DefaultUnstructuredConverter.FromUnstructured(actual.UnstructuredContent(), actualPod) + assert.NoError(t, err) + assert.Equal(t, tc.expectedPod, actualPod) + } }) } } + +func TestValidateContainer(t *testing.T) { + valid := `{"name": "test", "image": "busybox", "command": ["pwd"]}` + noName := `{"image": "busybox", "command": ["pwd"]}` + noImage := `{"name": "test", "command": ["pwd"]}` + noCommand := `{"name": "test", "image": "busybox"}` + expectedError := fmt.Errorf("invalid InitContainer in restore hook, it doesn't have Command, Name or Image field") + + // valid string should return nil as result. + assert.Equal(t, nil, ValidateContainer([]byte(valid))) + + // noName string should return expected error as result. + assert.Equal(t, expectedError, ValidateContainer([]byte(noName))) + + // noImage string should return expected error as result. + assert.Equal(t, expectedError, ValidateContainer([]byte(noImage))) + + // noCommand string should return expected error as result. + assert.Equal(t, expectedError, ValidateContainer([]byte(noCommand))) +} diff --git a/pkg/apis/velero/v1/restic_repository_types.go b/pkg/apis/velero/v1/backup_repository_types.go similarity index 64% rename from pkg/apis/velero/v1/restic_repository_types.go rename to pkg/apis/velero/v1/backup_repository_types.go index 8e315592f..300ecae9c 100644 --- a/pkg/apis/velero/v1/restic_repository_types.go +++ b/pkg/apis/velero/v1/backup_repository_types.go @@ -20,9 +20,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// ResticRepositorySpec is the specification for a ResticRepository. -type ResticRepositorySpec struct { - // VolumeNamespace is the namespace this restic repository contains +// BackupRepositorySpec is the specification for a BackupRepository. +type BackupRepositorySpec struct { + // VolumeNamespace is the namespace this backup repository contains // pod volume backups for. VolumeNamespace string `json:"volumeNamespace"` @@ -30,6 +30,11 @@ type ResticRepositorySpec struct { // that should contain this repository. BackupStorageLocation string `json:"backupStorageLocation"` + // RepositoryType indicates the type of the backend repository + // +kubebuilder:validation:Enum=kopia;restic;"" + // +optional + RepositoryType string `json:"repositoryType"` + // ResticIdentifier is the full restic-compatible string for identifying // this repository. ResticIdentifier string `json:"resticIdentifier"` @@ -38,23 +43,23 @@ type ResticRepositorySpec struct { MaintenanceFrequency metav1.Duration `json:"maintenanceFrequency"` } -// ResticRepositoryPhase represents the lifecycle phase of a ResticRepository. +// BackupRepositoryPhase represents the lifecycle phase of a BackupRepository. // +kubebuilder:validation:Enum=New;Ready;NotReady -type ResticRepositoryPhase string +type BackupRepositoryPhase string const ( - ResticRepositoryPhaseNew ResticRepositoryPhase = "New" - ResticRepositoryPhaseReady ResticRepositoryPhase = "Ready" - ResticRepositoryPhaseNotReady ResticRepositoryPhase = "NotReady" + BackupRepositoryPhaseNew BackupRepositoryPhase = "New" + BackupRepositoryPhaseReady BackupRepositoryPhase = "Ready" + BackupRepositoryPhaseNotReady BackupRepositoryPhase = "NotReady" ) -// ResticRepositoryStatus is the current status of a ResticRepository. -type ResticRepositoryStatus struct { - // Phase is the current state of the ResticRepository. +// BackupRepositoryStatus is the current status of a BackupRepository. +type BackupRepositoryStatus struct { + // Phase is the current state of the BackupRepository. // +optional - Phase ResticRepositoryPhase `json:"phase,omitempty"` + Phase BackupRepositoryPhase `json:"phase,omitempty"` - // Message is a message about the current status of the ResticRepository. + // Message is a message about the current status of the BackupRepository. // +optional Message string `json:"message,omitempty"` @@ -72,33 +77,35 @@ type ResticRepositoryStatus struct { // +kubebuilder:object:generate=true // +kubebuilder:storageversion // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Repository Type",type="string",JSONPath=".spec.repositoryType" +// -type ResticRepository struct { +type BackupRepository struct { metav1.TypeMeta `json:",inline"` // +optional metav1.ObjectMeta `json:"metadata,omitempty"` // +optional - Spec ResticRepositorySpec `json:"spec,omitempty"` + Spec BackupRepositorySpec `json:"spec,omitempty"` // +optional - Status ResticRepositoryStatus `json:"status,omitempty"` + Status BackupRepositoryStatus `json:"status,omitempty"` } // TODO(2.0) After converting all resources to use the runtime-controller client, // the k8s:deepcopy marker will no longer be needed and should be removed. // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true -// +kubebuilder:rbac:groups=velero.io,resources=resticrepositories,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=velero.io,resources=resticrepositories/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=velero.io,resources=backuprepositories,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=velero.io,resources=backuprepositories/status,verbs=get;update;patch -// ResticRepositoryList is a list of ResticRepositories. -type ResticRepositoryList struct { +// BackupRepositoryList is a list of BackupRepositories. +type BackupRepositoryList struct { metav1.TypeMeta `json:",inline"` // +optional metav1.ListMeta `json:"metadata,omitempty"` - Items []ResticRepository `json:"items"` + Items []BackupRepository `json:"items"` } diff --git a/pkg/apis/velero/v1/pod_volume_backup_types.go b/pkg/apis/velero/v1/pod_volume_backup_types.go index c532f096d..d34e09f6c 100644 --- a/pkg/apis/velero/v1/pod_volume_backup_types.go +++ b/pkg/apis/velero/v1/pod_volume_backup_types.go @@ -34,12 +34,17 @@ type PodVolumeBackupSpec struct { Volume string `json:"volume"` // BackupStorageLocation is the name of the backup storage location - // where the restic repository is stored. + // where the backup repository is stored. BackupStorageLocation string `json:"backupStorageLocation"` - // RepoIdentifier is the restic repository identifier. + // RepoIdentifier is the backup repository identifier. RepoIdentifier string `json:"repoIdentifier"` + // UploaderType is the type of the uploader to handle the data transfer. + // +kubebuilder:validation:Enum=kopia;restic;"" + // +optional + UploaderType string `json:"uploaderType"` + // Tags are a map of key-value pairs that should be applied to the // volume backup as tags. // +optional @@ -107,7 +112,8 @@ type PodVolumeBackupStatus struct { // +kubebuilder:printcolumn:name="Namespace",type="string",JSONPath=".spec.pod.namespace",description="Namespace of the pod containing the volume to be backed up" // +kubebuilder:printcolumn:name="Pod",type="string",JSONPath=".spec.pod.name",description="Name of the pod containing the volume to be backed up" // +kubebuilder:printcolumn:name="Volume",type="string",JSONPath=".spec.volume",description="Name of the volume to be backed up" -// +kubebuilder:printcolumn:name="Restic Repo",type="string",JSONPath=".spec.repoIdentifier",description="Restic repository identifier for this backup" +// +kubebuilder:printcolumn:name="Repository ID",type="string",JSONPath=".spec.repoIdentifier",description="Backup repository identifier for this backup" +// +kubebuilder:printcolumn:name="Uploader Type",type="string",JSONPath=".spec.uploaderType",description="The type of the uploader to handle data transfer" // +kubebuilder:printcolumn:name="Storage Location",type="string",JSONPath=".spec.backupStorageLocation",description="Name of the Backup Storage Location where this backup should be stored" // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" // +kubebuilder:object:root=true diff --git a/pkg/apis/velero/v1/pod_volume_restore_type.go b/pkg/apis/velero/v1/pod_volume_restore_type.go index 45bca8e24..e0370da63 100644 --- a/pkg/apis/velero/v1/pod_volume_restore_type.go +++ b/pkg/apis/velero/v1/pod_volume_restore_type.go @@ -30,12 +30,17 @@ type PodVolumeRestoreSpec struct { Volume string `json:"volume"` // BackupStorageLocation is the name of the backup storage location - // where the restic repository is stored. + // where the backup repository is stored. BackupStorageLocation string `json:"backupStorageLocation"` - // RepoIdentifier is the restic repository identifier. + // RepoIdentifier is the backup repository identifier. RepoIdentifier string `json:"repoIdentifier"` + // UploaderType is the type of the uploader to handle the data transfer. + // +kubebuilder:validation:Enum=kopia;restic;"" + // +optional + UploaderType string `json:"uploaderType"` + // SnapshotID is the ID of the volume snapshot to be restored. SnapshotID string `json:"snapshotID"` } @@ -89,6 +94,7 @@ type PodVolumeRestoreStatus struct { // +kubebuilder:storageversion // +kubebuilder:printcolumn:name="Namespace",type="string",JSONPath=".spec.pod.namespace",description="Namespace of the pod containing the volume to be restored" // +kubebuilder:printcolumn:name="Pod",type="string",JSONPath=".spec.pod.name",description="Name of the pod containing the volume to be restored" +// +kubebuilder:printcolumn:name="Uploader Type",type="string",JSONPath=".spec.uploaderType",description="The type of the uploader to handle data transfer" // +kubebuilder:printcolumn:name="Volume",type="string",JSONPath=".spec.volume",description="Name of the volume to be restored" // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="Pod Volume Restore status such as New/InProgress" // +kubebuilder:printcolumn:name="TotalBytes",type="integer",format="int64",JSONPath=".status.progress.totalBytes",description="Pod Volume Restore status such as New/InProgress" diff --git a/pkg/apis/velero/v1/register.go b/pkg/apis/velero/v1/register.go index ea7df3b5d..13915293a 100644 --- a/pkg/apis/velero/v1/register.go +++ b/pkg/apis/velero/v1/register.go @@ -52,7 +52,7 @@ func CustomResources() map[string]typeInfo { "DeleteBackupRequest": newTypeInfo("deletebackuprequests", &DeleteBackupRequest{}, &DeleteBackupRequestList{}), "PodVolumeBackup": newTypeInfo("podvolumebackups", &PodVolumeBackup{}, &PodVolumeBackupList{}), "PodVolumeRestore": newTypeInfo("podvolumerestores", &PodVolumeRestore{}, &PodVolumeRestoreList{}), - "ResticRepository": newTypeInfo("resticrepositories", &ResticRepository{}, &ResticRepositoryList{}), + "BackupRepository": newTypeInfo("backuprepositories", &BackupRepository{}, &BackupRepositoryList{}), "BackupStorageLocation": newTypeInfo("backupstoragelocations", &BackupStorageLocation{}, &BackupStorageLocationList{}), "VolumeSnapshotLocation": newTypeInfo("volumesnapshotlocations", &VolumeSnapshotLocation{}, &VolumeSnapshotLocationList{}), "ServerStatusRequest": newTypeInfo("serverstatusrequests", &ServerStatusRequest{}, &ServerStatusRequestList{}), diff --git a/pkg/apis/velero/v1/restore.go b/pkg/apis/velero/v1/restore.go index 251f1a045..f36e7348c 100644 --- a/pkg/apis/velero/v1/restore.go +++ b/pkg/apis/velero/v1/restore.go @@ -17,8 +17,8 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) // RestoreSpec defines the specification for a Velero restore. @@ -208,9 +208,10 @@ type ExecRestoreHook struct { // InitRestoreHook is a hook that adds an init container to a PodSpec to run commands before the // workload pod is able to start. type InitRestoreHook struct { + // +kubebuilder:pruning:PreserveUnknownFields // InitContainers is list of init containers to be added to a pod during its restore. // +optional - InitContainers []v1.Container `json:"initContainers"` + InitContainers []runtime.RawExtension `json:"initContainers"` // Timeout defines the maximum amount of time Velero should wait for the initContainers to complete. // +optional diff --git a/pkg/apis/velero/v1/volume_snapshot_location.go b/pkg/apis/velero/v1/volume_snapshot_location_type.go similarity index 88% rename from pkg/apis/velero/v1/volume_snapshot_location.go rename to pkg/apis/velero/v1/volume_snapshot_location_type.go index a2ba652a6..505e1d994 100644 --- a/pkg/apis/velero/v1/volume_snapshot_location.go +++ b/pkg/apis/velero/v1/volume_snapshot_location_type.go @@ -1,5 +1,5 @@ /* -Copyright 2018 the Velero contributors. +Copyright the Velero contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,6 +20,10 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=vsl +// +kubebuilder:object:generate=true +// +kubebuilder:storageversion // VolumeSnapshotLocation is a location where Velero stores volume snapshots. type VolumeSnapshotLocation struct { @@ -36,6 +40,8 @@ type VolumeSnapshotLocation struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:rbac:groups=velero.io,resources=volumesnapshotlocations,verbs=get;list;watch;create;update;patch;delete // VolumeSnapshotLocationList is a list of VolumeSnapshotLocations. type VolumeSnapshotLocationList struct { diff --git a/pkg/apis/velero/v1/zz_generated.deepcopy.go b/pkg/apis/velero/v1/zz_generated.deepcopy.go index 9cc49a8b5..7cf271e8f 100644 --- a/pkg/apis/velero/v1/zz_generated.deepcopy.go +++ b/pkg/apis/velero/v1/zz_generated.deepcopy.go @@ -107,6 +107,100 @@ func (in *BackupProgress) DeepCopy() *BackupProgress { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupRepository) DeepCopyInto(out *BackupRepository) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupRepository. +func (in *BackupRepository) DeepCopy() *BackupRepository { + if in == nil { + return nil + } + out := new(BackupRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupRepository) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupRepositoryList) DeepCopyInto(out *BackupRepositoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackupRepository, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupRepositoryList. +func (in *BackupRepositoryList) DeepCopy() *BackupRepositoryList { + if in == nil { + return nil + } + out := new(BackupRepositoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupRepositoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupRepositorySpec) DeepCopyInto(out *BackupRepositorySpec) { + *out = *in + out.MaintenanceFrequency = in.MaintenanceFrequency +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupRepositorySpec. +func (in *BackupRepositorySpec) DeepCopy() *BackupRepositorySpec { + if in == nil { + return nil + } + out := new(BackupRepositorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupRepositoryStatus) DeepCopyInto(out *BackupRepositoryStatus) { + *out = *in + if in.LastMaintenanceTime != nil { + in, out := &in.LastMaintenanceTime, &out.LastMaintenanceTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupRepositoryStatus. +func (in *BackupRepositoryStatus) DeepCopy() *BackupRepositoryStatus { + if in == nil { + return nil + } + out := new(BackupRepositoryStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BackupResourceHook) DeepCopyInto(out *BackupResourceHook) { *out = *in @@ -671,7 +765,7 @@ func (in *InitRestoreHook) DeepCopyInto(out *InitRestoreHook) { *out = *in if in.InitContainers != nil { in, out := &in.InitContainers, &out.InitContainers - *out = make([]corev1.Container, len(*in)) + *out = make([]runtime.RawExtension, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -966,100 +1060,6 @@ func (in *PodVolumeRestoreStatus) DeepCopy() *PodVolumeRestoreStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResticRepository) DeepCopyInto(out *ResticRepository) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResticRepository. -func (in *ResticRepository) DeepCopy() *ResticRepository { - if in == nil { - return nil - } - out := new(ResticRepository) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResticRepository) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResticRepositoryList) DeepCopyInto(out *ResticRepositoryList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ResticRepository, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResticRepositoryList. -func (in *ResticRepositoryList) DeepCopy() *ResticRepositoryList { - if in == nil { - return nil - } - out := new(ResticRepositoryList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResticRepositoryList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResticRepositorySpec) DeepCopyInto(out *ResticRepositorySpec) { - *out = *in - out.MaintenanceFrequency = in.MaintenanceFrequency -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResticRepositorySpec. -func (in *ResticRepositorySpec) DeepCopy() *ResticRepositorySpec { - if in == nil { - return nil - } - out := new(ResticRepositorySpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResticRepositoryStatus) DeepCopyInto(out *ResticRepositoryStatus) { - *out = *in - if in.LastMaintenanceTime != nil { - in, out := &in.LastMaintenanceTime, &out.LastMaintenanceTime - *out = (*in).DeepCopy() - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResticRepositoryStatus. -func (in *ResticRepositoryStatus) DeepCopy() *ResticRepositoryStatus { - if in == nil { - return nil - } - out := new(ResticRepositoryStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Restore) DeepCopyInto(out *Restore) { *out = *in diff --git a/pkg/backup/backup.go b/pkg/backup/backup.go index 697be8500..d026b09cf 100644 --- a/pkg/backup/backup.go +++ b/pkg/backup/backup.go @@ -46,7 +46,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/plugin/framework" "github.com/vmware-tanzu/velero/pkg/plugin/velero" "github.com/vmware-tanzu/velero/pkg/podexec" - "github.com/vmware-tanzu/velero/pkg/restic" + "github.com/vmware-tanzu/velero/pkg/podvolume" "github.com/vmware-tanzu/velero/pkg/util/boolptr" "github.com/vmware-tanzu/velero/pkg/util/collections" ) @@ -74,7 +74,7 @@ type kubernetesBackupper struct { dynamicFactory client.DynamicFactory discoveryHelper discovery.Helper podCommandExecutor podexec.PodCommandExecutor - resticBackupperFactory restic.BackupperFactory + resticBackupperFactory podvolume.BackupperFactory resticTimeout time.Duration defaultVolumesToRestic bool clientPageSize int @@ -100,7 +100,7 @@ func NewKubernetesBackupper( discoveryHelper discovery.Helper, dynamicFactory client.DynamicFactory, podCommandExecutor podexec.PodCommandExecutor, - resticBackupperFactory restic.BackupperFactory, + resticBackupperFactory podvolume.BackupperFactory, resticTimeout time.Duration, defaultVolumesToRestic bool, clientPageSize int, @@ -234,7 +234,7 @@ func (kb *kubernetesBackupper) BackupWithResolvers(log logrus.FieldLogger, ctx, cancelFunc := context.WithTimeout(context.Background(), podVolumeTimeout) defer cancelFunc() - var resticBackupper restic.Backupper + var resticBackupper podvolume.Backupper if kb.resticBackupperFactory != nil { resticBackupper, err = kb.resticBackupperFactory.NewBackupper(ctx, backupRequest.Backup) if err != nil { diff --git a/pkg/backup/backup_test.go b/pkg/backup/backup_test.go index 53554e8b3..cf6a4269f 100644 --- a/pkg/backup/backup_test.go +++ b/pkg/backup/backup_test.go @@ -47,7 +47,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/discovery" "github.com/vmware-tanzu/velero/pkg/kuberesource" "github.com/vmware-tanzu/velero/pkg/plugin/velero" - "github.com/vmware-tanzu/velero/pkg/restic" + "github.com/vmware-tanzu/velero/pkg/podvolume" "github.com/vmware-tanzu/velero/pkg/test" testutil "github.com/vmware-tanzu/velero/pkg/test" kubeutil "github.com/vmware-tanzu/velero/pkg/util/kube" @@ -2595,7 +2595,7 @@ func TestBackupWithHooks(t *testing.T) { type fakeResticBackupperFactory struct{} -func (f *fakeResticBackupperFactory) NewBackupper(context.Context, *velerov1.Backup) (restic.Backupper, error) { +func (f *fakeResticBackupperFactory) NewBackupper(context.Context, *velerov1.Backup) (podvolume.Backupper, error) { return &fakeResticBackupper{}, nil } diff --git a/pkg/backup/item_backupper.go b/pkg/backup/item_backupper.go index fd4b16028..df5d48cd2 100644 --- a/pkg/backup/item_backupper.go +++ b/pkg/backup/item_backupper.go @@ -42,7 +42,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/features" "github.com/vmware-tanzu/velero/pkg/kuberesource" "github.com/vmware-tanzu/velero/pkg/plugin/velero" - "github.com/vmware-tanzu/velero/pkg/restic" + "github.com/vmware-tanzu/velero/pkg/podvolume" "github.com/vmware-tanzu/velero/pkg/util/boolptr" "github.com/vmware-tanzu/velero/pkg/volume" ) @@ -53,7 +53,7 @@ type itemBackupper struct { tarWriter tarWriter dynamicFactory client.DynamicFactory discoveryHelper discovery.Helper - resticBackupper restic.Backupper + resticBackupper podvolume.Backupper resticSnapshotTracker *pvcSnapshotTracker volumeSnapshotterGetter VolumeSnapshotterGetter @@ -61,6 +61,11 @@ type itemBackupper struct { snapshotLocationVolumeSnapshotters map[string]velero.VolumeSnapshotter } +const ( + // veleroExcludeFromBackupLabel labeled item should be exclude by velero in backup job. + veleroExcludeFromBackupLabel = "velero.io/exclude-from-backup" +) + // backupItem backs up an individual item to tarWriter. The item may be excluded based on the // namespaces IncludesExcludes list. // In addition to the error return, backupItem also returns a bool indicating whether the item @@ -78,8 +83,8 @@ func (ib *itemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstr log = log.WithField("resource", groupResource.String()) log = log.WithField("namespace", namespace) - if metadata.GetLabels()["velero.io/exclude-from-backup"] == "true" { - log.Info("Excluding item because it has label velero.io/exclude-from-backup=true") + if metadata.GetLabels()[veleroExcludeFromBackupLabel] == "true" { + log.Infof("Excluding item because it has label %s=true", veleroExcludeFromBackupLabel) return false, nil } @@ -144,7 +149,7 @@ func (ib *itemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstr // Get the list of volumes to back up using restic from the pod's annotations. Remove from this list // any volumes that use a PVC that we've already backed up (this would be in a read-write-many scenario, // where it's been backed up from another pod), since we don't need >1 backup per PVC. - for _, volume := range restic.GetPodVolumesUsingRestic(pod, boolptr.IsSetToTrue(ib.backupRequest.Spec.DefaultVolumesToRestic)) { + for _, volume := range podvolume.GetPodVolumesUsingRestic(pod, boolptr.IsSetToTrue(ib.backupRequest.Spec.DefaultVolumesToRestic)) { if found, pvcName := ib.resticSnapshotTracker.HasPVCForPodVolume(pod, volume); found { log.WithFields(map[string]interface{}{ "podVolume": volume, diff --git a/pkg/builder/container_builder.go b/pkg/builder/container_builder.go index 80c99955d..da6215637 100644 --- a/pkg/builder/container_builder.go +++ b/pkg/builder/container_builder.go @@ -17,9 +17,11 @@ limitations under the License. package builder import ( + "encoding/json" "strings" corev1api "k8s.io/api/core/v1" + apimachineryRuntime "k8s.io/apimachinery/pkg/runtime" ) // ContainerBuilder builds Container objects @@ -89,6 +91,17 @@ func (b *ContainerBuilder) Result() *corev1api.Container { return b.object } +// ResultRawExtension returns the Container as runtime.RawExtension. +func (b *ContainerBuilder) ResultRawExtension() apimachineryRuntime.RawExtension { + result, err := json.Marshal(b.object) + if err != nil { + return apimachineryRuntime.RawExtension{} + } + return apimachineryRuntime.RawExtension{ + Raw: result, + } +} + // Args sets the container's Args. func (b *ContainerBuilder) Args(args ...string) *ContainerBuilder { b.object.Args = append(b.object.Args, args...) diff --git a/pkg/cmd/cli/backuplocation/delete.go b/pkg/cmd/cli/backuplocation/delete.go index daedf77cd..1222220f5 100644 --- a/pkg/cmd/cli/backuplocation/delete.go +++ b/pkg/cmd/cli/backuplocation/delete.go @@ -151,8 +151,8 @@ func findAssociatedBackups(client kbclient.Client, bslName, ns string) (velerov1 return backups, err } -func findAssociatedResticRepos(client kbclient.Client, bslName, ns string) (velerov1api.ResticRepositoryList, error) { - var repos velerov1api.ResticRepositoryList +func findAssociatedResticRepos(client kbclient.Client, bslName, ns string) (velerov1api.BackupRepositoryList, error) { + var repos velerov1api.BackupRepositoryList err := client.List(context.Background(), &repos, &kbclient.ListOptions{ Namespace: ns, Raw: &metav1.ListOptions{LabelSelector: bslLabelKey + "=" + bslName}, @@ -172,7 +172,7 @@ func deleteBackups(client kbclient.Client, backups velerov1api.BackupList) []err return errs } -func deleteResticRepos(client kbclient.Client, repos velerov1api.ResticRepositoryList) []error { +func deleteResticRepos(client kbclient.Client, repos velerov1api.BackupRepositoryList) []error { var errs []error for _, repo := range repos.Items { if err := client.Delete(context.Background(), &repo, &kbclient.DeleteOptions{}); err != nil { diff --git a/pkg/cmd/cli/restic/repo/get.go b/pkg/cmd/cli/restic/repo/get.go index 8fd848fd4..24692f355 100644 --- a/pkg/cmd/cli/restic/repo/get.go +++ b/pkg/cmd/cli/restic/repo/get.go @@ -41,16 +41,16 @@ func NewGetCommand(f client.Factory, use string) *cobra.Command { veleroClient, err := f.Client() cmd.CheckError(err) - var repos *api.ResticRepositoryList + var repos *api.BackupRepositoryList if len(args) > 0 { - repos = new(api.ResticRepositoryList) + repos = new(api.BackupRepositoryList) for _, name := range args { - repo, err := veleroClient.VeleroV1().ResticRepositories(f.Namespace()).Get(context.TODO(), name, metav1.GetOptions{}) + repo, err := veleroClient.VeleroV1().BackupRepositories(f.Namespace()).Get(context.TODO(), name, metav1.GetOptions{}) cmd.CheckError(err) repos.Items = append(repos.Items, *repo) } } else { - repos, err = veleroClient.VeleroV1().ResticRepositories(f.Namespace()).List(context.TODO(), listOptions) + repos, err = veleroClient.VeleroV1().BackupRepositories(f.Namespace()).List(context.TODO(), listOptions) cmd.CheckError(err) } diff --git a/pkg/cmd/cli/snapshotlocation/create.go b/pkg/cmd/cli/snapshotlocation/create.go index 824156eac..2de6b2827 100644 --- a/pkg/cmd/cli/snapshotlocation/create.go +++ b/pkg/cmd/cli/snapshotlocation/create.go @@ -63,6 +63,7 @@ type CreateOptions struct { func NewCreateOptions() *CreateOptions { return &CreateOptions{ Config: flag.NewMap(), + Labels: flag.NewMap(), } } diff --git a/pkg/cmd/server/plugin/plugin.go b/pkg/cmd/server/plugin/plugin.go index 5c833d76a..5dec03856 100644 --- a/pkg/cmd/server/plugin/plugin.go +++ b/pkg/cmd/server/plugin/plugin.go @@ -19,9 +19,11 @@ package plugin import ( "github.com/sirupsen/logrus" "github.com/spf13/cobra" - apiextensions "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/features" + "github.com/vmware-tanzu/velero/pkg/backup" "github.com/vmware-tanzu/velero/pkg/client" velerodiscovery "github.com/vmware-tanzu/velero/pkg/discovery" @@ -36,11 +38,10 @@ func NewCommand(f client.Factory) *cobra.Command { Hidden: true, Short: "INTERNAL COMMAND ONLY - not intended to be run directly by users", Run: func(c *cobra.Command, args []string) { - pluginServer. + pluginServer = pluginServer. RegisterBackupItemAction("velero.io/pv", newPVBackupItemAction). RegisterBackupItemAction("velero.io/pod", newPodBackupItemAction). RegisterBackupItemAction("velero.io/service-account", newServiceAccountBackupItemAction(f)). - RegisterBackupItemAction("velero.io/crd-remap-version", newRemapCRDVersionAction(f)). RegisterRestoreItemAction("velero.io/job", newJobRestoreItemAction). RegisterRestoreItemAction("velero.io/pod", newPodRestoreItemAction). RegisterRestoreItemAction("velero.io/restic", newResticRestoreItemAction(f)). @@ -55,13 +56,15 @@ func NewCommand(f client.Factory) *cobra.Command { RegisterRestoreItemAction("velero.io/crd-preserve-fields", newCRDV1PreserveUnknownFieldsItemAction). RegisterRestoreItemAction("velero.io/change-pvc-node-selector", newChangePVCNodeSelectorItemAction(f)). RegisterRestoreItemAction("velero.io/apiservice", newAPIServiceRestoreItemAction). - RegisterRestoreItemAction("velero.io/admission-webhook-configuration", newAdmissionWebhookConfigurationAction). - Serve() + RegisterRestoreItemAction("velero.io/admission-webhook-configuration", newAdmissionWebhookConfigurationAction) + if !features.IsEnabled(velerov1api.APIGroupVersionsFeatureFlag) { + // Do not register crd-remap-version BIA if the API Group feature flag is enabled, so that the v1 CRD can be backed up + pluginServer = pluginServer.RegisterBackupItemAction("velero.io/crd-remap-version", newRemapCRDVersionAction(f)) + } + pluginServer.Serve() }, } - pluginServer.BindFlags(c.Flags()) - return c } diff --git a/pkg/cmd/server/server.go b/pkg/cmd/server/server.go index e837020d3..f45a30220 100644 --- a/pkg/cmd/server/server.go +++ b/pkg/cmd/server/server.go @@ -80,6 +80,7 @@ import ( "github.com/vmware-tanzu/velero/internal/storage" "github.com/vmware-tanzu/velero/internal/util/managercontroller" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + repokey "github.com/vmware-tanzu/velero/pkg/repository/keys" ) const ( @@ -522,7 +523,7 @@ func (s *server) initRestic() error { } // ensure the repo key secret is set up - if err := restic.EnsureCommonRepositoryKey(s.kubeClient.CoreV1(), s.namespace); err != nil { + if err := repokey.EnsureCommonRepositoryKey(s.kubeClient.CoreV1(), s.namespace); err != nil { return err } @@ -530,7 +531,7 @@ func (s *server) initRestic() error { s.ctx, s.namespace, s.veleroClient, - s.sharedInformerFactory.Velero().V1().ResticRepositories(), + s.sharedInformerFactory.Velero().V1().BackupRepositories(), s.veleroClient.VeleroV1(), s.mgr.GetClient(), s.kubeClient.CoreV1(), diff --git a/pkg/cmd/util/output/output.go b/pkg/cmd/util/output/output.go index 24188ed0d..ab3f7a95d 100644 --- a/pkg/cmd/util/output/output.go +++ b/pkg/cmd/util/output/output.go @@ -177,15 +177,15 @@ func printTable(cmd *cobra.Command, obj runtime.Object) (bool, error) { ColumnDefinitions: scheduleColumns, Rows: printScheduleList(obj.(*velerov1api.ScheduleList)), } - case *velerov1api.ResticRepository: + case *velerov1api.BackupRepository: table = &metav1.Table{ ColumnDefinitions: resticRepoColumns, - Rows: printResticRepo(obj.(*velerov1api.ResticRepository)), + Rows: printResticRepo(obj.(*velerov1api.BackupRepository)), } - case *velerov1api.ResticRepositoryList: + case *velerov1api.BackupRepositoryList: table = &metav1.Table{ ColumnDefinitions: resticRepoColumns, - Rows: printResticRepoList(obj.(*velerov1api.ResticRepositoryList)), + Rows: printResticRepoList(obj.(*velerov1api.BackupRepositoryList)), } case *velerov1api.BackupStorageLocation: table = &metav1.Table{ diff --git a/pkg/cmd/util/output/restic_repo_printer.go b/pkg/cmd/util/output/restic_repo_printer.go index 803a3486f..fd6766087 100644 --- a/pkg/cmd/util/output/restic_repo_printer.go +++ b/pkg/cmd/util/output/restic_repo_printer.go @@ -33,7 +33,7 @@ var ( } ) -func printResticRepoList(list *v1.ResticRepositoryList) []metav1.TableRow { +func printResticRepoList(list *v1.BackupRepositoryList) []metav1.TableRow { rows := make([]metav1.TableRow, 0, len(list.Items)) for i := range list.Items { @@ -42,14 +42,14 @@ func printResticRepoList(list *v1.ResticRepositoryList) []metav1.TableRow { return rows } -func printResticRepo(repo *v1.ResticRepository) []metav1.TableRow { +func printResticRepo(repo *v1.BackupRepository) []metav1.TableRow { row := metav1.TableRow{ Object: runtime.RawExtension{Object: repo}, } status := repo.Status.Phase if status == "" { - status = v1.ResticRepositoryPhaseNew + status = v1.BackupRepositoryPhaseNew } var lastMaintenance string diff --git a/pkg/controller/backup_controller.go b/pkg/controller/backup_controller.go index eff0861eb..59080abf3 100644 --- a/pkg/controller/backup_controller.go +++ b/pkg/controller/backup_controller.go @@ -70,6 +70,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/util/logging" "github.com/vmware-tanzu/velero/pkg/volume" + corev1api "k8s.io/api/core/v1" kbclient "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -260,7 +261,6 @@ func (c *backupController) processBackup(key string) error { log.Debug("Preparing backup request") request := c.prepareBackupRequest(original) - if len(request.Status.ValidationErrors) > 0 { request.Status.Phase = velerov1api.BackupPhaseFailedValidation } else { @@ -444,6 +444,17 @@ func (c *backupController) prepareBackupRequest(backup *velerov1api.Backup) *pkg request.Annotations[velerov1api.SourceClusterK8sMajorVersionAnnotation] = c.discoveryHelper.ServerVersion().Major request.Annotations[velerov1api.SourceClusterK8sMinorVersionAnnotation] = c.discoveryHelper.ServerVersion().Minor + // Add namespaces with label velero.io/exclude-from-backup=true into request.Spec.ExcludedNamespaces + // Essentially, adding the label velero.io/exclude-from-backup=true to a namespace would be equivalent to setting spec.ExcludedNamespaces + namespaces := corev1api.NamespaceList{} + if err := c.kbClient.List(context.Background(), &namespaces, kbclient.MatchingLabels{"velero.io/exclude-from-backup": "true"}); err == nil { + for _, ns := range namespaces.Items { + request.Spec.ExcludedNamespaces = append(request.Spec.ExcludedNamespaces, ns.Name) + } + } else { + request.Status.ValidationErrors = append(request.Status.ValidationErrors, fmt.Sprintf("error getting namespace list: %v", err)) + } + // validate the included/excluded resources for _, err := range collections.ValidateIncludesExcludes(request.Spec.IncludedResources, request.Spec.ExcludedResources) { request.Status.ValidationErrors = append(request.Status.ValidationErrors, fmt.Sprintf("Invalid included/excluded resource lists: %v", err)) diff --git a/pkg/controller/backup_deletion_controller.go b/pkg/controller/backup_deletion_controller.go index 9cbfe1e8d..52d358042 100644 --- a/pkg/controller/backup_deletion_controller.go +++ b/pkg/controller/backup_deletion_controller.go @@ -198,7 +198,7 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque return ctrl.Result{}, err } - // if the request object has no labels defined, initialise an empty map since + // if the request object has no labels defined, initialize an empty map since // we will be updating labels if dbr.Labels == nil { dbr.Labels = map[string]string{} diff --git a/pkg/controller/backup_storage_location_controller.go b/pkg/controller/backup_storage_location_controller.go index ec35a8916..1b08da897 100644 --- a/pkg/controller/backup_storage_location_controller.go +++ b/pkg/controller/backup_storage_location_controller.go @@ -24,12 +24,10 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/predicate" "github.com/vmware-tanzu/velero/internal/storage" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" @@ -39,7 +37,10 @@ import ( ) const ( - backupStorageLocationSyncPeriod = 1 * time.Minute + // keep the enqueue period a smaller value to make sure the BSL can be validated as expected. + // The BSL validation frequency is 1 minute by default, if we set the enqueue period as 1 minute, + // this will cause the actual validation interval for each BSL to be 2 minutes + bslValidationEnqueuePeriod = 10 * time.Second ) // BackupStorageLocationReconciler reconciles a BackupStorageLocation object @@ -185,7 +186,7 @@ func (r *BackupStorageLocationReconciler) SetupWithManager(mgr ctrl.Manager) err r.Log, mgr.GetClient(), &velerov1api.BackupStorageLocationList{}, - backupStorageLocationSyncPeriod, + bslValidationEnqueuePeriod, // Add filter function to enqueue BSL per ValidationFrequency setting. func(object client.Object) bool { location := object.(*velerov1api.BackupStorageLocation) @@ -193,22 +194,8 @@ func (r *BackupStorageLocationReconciler) SetupWithManager(mgr ctrl.Manager) err }, ) return ctrl.NewControllerManagedBy(mgr). - For(&velerov1api.BackupStorageLocation{}). - // Handle BSL's creation event and spec update event to let changed BSL got validation immediately. - WithEventFilter(predicate.Funcs{ - CreateFunc: func(ce event.CreateEvent) bool { - return true - }, - UpdateFunc: func(ue event.UpdateEvent) bool { - return ue.ObjectNew.GetGeneration() != ue.ObjectOld.GetGeneration() - }, - DeleteFunc: func(de event.DeleteEvent) bool { - return false - }, - GenericFunc: func(ge event.GenericEvent) bool { - return false - }, - }). + // As the "status.LastValidationTime" field is always updated, this triggers new reconciling process, skip the update event that include no spec change to avoid the reconcile loop + For(&velerov1api.BackupStorageLocation{}, builder.WithPredicates(kube.SpecChangePredicate{})). Watches(g, nil). Complete(r) } diff --git a/pkg/controller/pod_volume_backup_controller.go b/pkg/controller/pod_volume_backup_controller.go index 322592f80..abec6d601 100644 --- a/pkg/controller/pod_volume_backup_controller.go +++ b/pkg/controller/pod_volume_backup_controller.go @@ -36,6 +36,7 @@ import ( "github.com/vmware-tanzu/velero/internal/credentials" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/metrics" + repokey "github.com/vmware-tanzu/velero/pkg/repository/keys" "github.com/vmware-tanzu/velero/pkg/restic" "github.com/vmware-tanzu/velero/pkg/util/filesystem" "github.com/vmware-tanzu/velero/pkg/util/kube" @@ -124,7 +125,11 @@ func (r *PodVolumeBackupReconciler) Reconcile(ctx context.Context, req ctrl.Requ if err != nil { return r.updateStatusToFailed(ctx, &pvb, err, "building Restic command", log) } - defer os.Remove(resticDetails.credsFile) + + defer func() { + os.Remove(resticDetails.credsFile) + os.Remove(resticDetails.caCertFile) + }() backupLocation := &velerov1api.BackupStorageLocation{} if err := r.Client.Get(context.Background(), client.ObjectKey{ @@ -204,19 +209,6 @@ func (r *PodVolumeBackupReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -func (r *PodVolumeBackupReconciler) singlePathMatch(path string) (string, error) { - matches, err := r.FileSystem.Glob(path) - if err != nil { - return "", errors.WithStack(err) - } - - if len(matches) != 1 { - return "", errors.Errorf("expected one matching path: %s, got %d", path, len(matches)) - } - - return matches[0], nil -} - // getParentSnapshot finds the most recent completed PodVolumeBackup for the // specified PVC and returns its Restic snapshot ID. Any errors encountered are // logged but not returned since they do not prevent a backup from proceeding. @@ -237,7 +229,7 @@ func (r *PodVolumeBackupReconciler) getParentSnapshot(ctx context.Context, log l // Go through all the podvolumebackups for the PVC and look for the most // recent completed one to use as the parent. - var mostRecentPVB *velerov1api.PodVolumeBackup + var mostRecentPVB velerov1api.PodVolumeBackup for _, pvb := range pvbList.Items { if pvb.Status.Phase != velerov1api.PodVolumeBackupPhaseCompleted { continue @@ -254,12 +246,12 @@ func (r *PodVolumeBackupReconciler) getParentSnapshot(ctx context.Context, log l continue } - if mostRecentPVB == nil || pvb.Status.StartTimestamp.After(mostRecentPVB.Status.StartTimestamp.Time) { - mostRecentPVB = &pvb + if mostRecentPVB.Status == (velerov1api.PodVolumeBackupStatus{}) || pvb.Status.StartTimestamp.After(mostRecentPVB.Status.StartTimestamp.Time) { + mostRecentPVB = pvb } } - if mostRecentPVB == nil { + if mostRecentPVB.Status == (velerov1api.PodVolumeBackupStatus{}) { log.Info("No completed PodVolumeBackup found for PVC") return "" } @@ -313,14 +305,14 @@ func (r *PodVolumeBackupReconciler) buildResticCommand(ctx context.Context, log pathGlob := fmt.Sprintf("/host_pods/%s/volumes/*/%s", string(pvb.Spec.Pod.UID), volDir) log.WithField("pathGlob", pathGlob).Debug("Looking for path matching glob") - path, err := r.singlePathMatch(pathGlob) + path, err := kube.SinglePathMatch(pathGlob, r.FileSystem, log) if err != nil { return nil, errors.Wrap(err, "identifying unique volume path on host") } log.WithField("path", path).Debugf("Found path matching glob") // Temporary credentials. - details.credsFile, err = r.CredsFileStore.Path(restic.RepoKeySelector()) + details.credsFile, err = r.CredsFileStore.Path(repokey.RepoKeySelector()) if err != nil { return nil, errors.Wrap(err, "creating temporary Restic credentials file") } @@ -344,8 +336,6 @@ func (r *PodVolumeBackupReconciler) buildResticCommand(ctx context.Context, log if err != nil { log.WithError(err).Error("creating temporary caCert file") } - defer os.Remove(details.caCertFile) - } cmd.CACertFile = details.caCertFile diff --git a/pkg/controller/pod_volume_restore_controller.go b/pkg/controller/pod_volume_restore_controller.go index 3315dae3b..2b81d363a 100644 --- a/pkg/controller/pod_volume_restore_controller.go +++ b/pkg/controller/pod_volume_restore_controller.go @@ -39,6 +39,7 @@ import ( "github.com/vmware-tanzu/velero/internal/credentials" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + repokey "github.com/vmware-tanzu/velero/pkg/repository/keys" "github.com/vmware-tanzu/velero/pkg/restic" "github.com/vmware-tanzu/velero/pkg/util/boolptr" "github.com/vmware-tanzu/velero/pkg/util/filesystem" @@ -215,19 +216,6 @@ func getResticInitContainerIndex(pod *corev1api.Pod) int { return -1 } -func singlePathMatch(path string) (string, error) { - matches, err := filepath.Glob(path) - if err != nil { - return "", errors.WithStack(err) - } - - if len(matches) != 1 { - return "", errors.Errorf("expected one matching path: %s, got %d", path, len(matches)) - } - - return matches[0], nil -} - func (c *PodVolumeRestoreReconciler) processRestore(ctx context.Context, req *velerov1api.PodVolumeRestore, pod *corev1api.Pod, log logrus.FieldLogger) error { volumeDir, err := kube.GetVolumeDirectory(ctx, log, pod, req.Spec.Volume, c.Client) if err != nil { @@ -236,12 +224,14 @@ func (c *PodVolumeRestoreReconciler) processRestore(ctx context.Context, req *ve // Get the full path of the new volume's directory as mounted in the daemonset pod, which // will look like: /host_pods//volumes// - volumePath, err := singlePathMatch(fmt.Sprintf("/host_pods/%s/volumes/*/%s", string(req.Spec.Pod.UID), volumeDir)) + volumePath, err := kube.SinglePathMatch( + fmt.Sprintf("/host_pods/%s/volumes/*/%s", string(req.Spec.Pod.UID), volumeDir), + c.fileSystem, log) if err != nil { return errors.Wrap(err, "error identifying path of volume") } - credsFile, err := c.credentialsFileStore.Path(restic.RepoKeySelector()) + credsFile, err := c.credentialsFileStore.Path(repokey.RepoKeySelector()) if err != nil { return errors.Wrap(err, "error creating temp restic credentials file") } diff --git a/pkg/controller/restic_repository_controller.go b/pkg/controller/restic_repository_controller.go index d4d0ef68d..c3ca1505a 100644 --- a/pkg/controller/restic_repository_controller.go +++ b/pkg/controller/restic_repository_controller.go @@ -26,11 +26,11 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/clock" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + repoconfig "github.com/vmware-tanzu/velero/pkg/repository/config" "github.com/vmware-tanzu/velero/pkg/restic" "github.com/vmware-tanzu/velero/pkg/util/kube" ) @@ -68,16 +68,16 @@ func NewResticRepoReconciler(namespace string, logger logrus.FieldLogger, client } func (r *ResticRepoReconciler) SetupWithManager(mgr ctrl.Manager) error { - s := kube.NewPeriodicalEnqueueSource(r.logger, mgr.GetClient(), &velerov1api.ResticRepositoryList{}, repoSyncPeriod) + s := kube.NewPeriodicalEnqueueSource(r.logger, mgr.GetClient(), &velerov1api.BackupRepositoryList{}, repoSyncPeriod) return ctrl.NewControllerManagedBy(mgr). - For(&velerov1api.ResticRepository{}). + For(&velerov1api.BackupRepository{}). Watches(s, nil). Complete(r) } func (r *ResticRepoReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { log := r.logger.WithField("resticRepo", req.String()) - resticRepo := &velerov1api.ResticRepository{} + resticRepo := &velerov1api.BackupRepository{} if err := r.Get(ctx, req.NamespacedName, resticRepo); err != nil { if apierrors.IsNotFound(err) { log.Warnf("restic repository %s in namespace %s is not found", req.Name, req.Namespace) @@ -87,7 +87,7 @@ func (r *ResticRepoReconciler) Reconcile(ctx context.Context, req ctrl.Request) return ctrl.Result{}, err } - if resticRepo.Status.Phase == "" || resticRepo.Status.Phase == velerov1api.ResticRepositoryPhaseNew { + if resticRepo.Status.Phase == "" || resticRepo.Status.Phase == velerov1api.BackupRepositoryPhaseNew { if err := r.initializeRepo(ctx, resticRepo, log); err != nil { log.WithError(err).Error("error initialize repository") return ctrl.Result{}, errors.WithStack(err) @@ -105,16 +105,16 @@ func (r *ResticRepoReconciler) Reconcile(ctx context.Context, req ctrl.Request) } switch resticRepo.Status.Phase { - case velerov1api.ResticRepositoryPhaseReady: + case velerov1api.BackupRepositoryPhaseReady: return ctrl.Result{}, r.runMaintenanceIfDue(ctx, resticRepo, log) - case velerov1api.ResticRepositoryPhaseNotReady: + case velerov1api.BackupRepositoryPhaseNotReady: return ctrl.Result{}, r.checkNotReadyRepo(ctx, resticRepo, log) } return ctrl.Result{}, nil } -func (r *ResticRepoReconciler) initializeRepo(ctx context.Context, req *velerov1api.ResticRepository, log logrus.FieldLogger) error { +func (r *ResticRepoReconciler) initializeRepo(ctx context.Context, req *velerov1api.BackupRepository, log logrus.FieldLogger) error { log.Info("Initializing restic repository") // confirm the repo's BackupStorageLocation is valid @@ -127,11 +127,11 @@ func (r *ResticRepoReconciler) initializeRepo(ctx context.Context, req *velerov1 return r.patchResticRepository(ctx, req, repoNotReady(err.Error())) } - repoIdentifier, err := restic.GetRepoIdentifier(loc, req.Spec.VolumeNamespace) + repoIdentifier, err := repoconfig.GetRepoIdentifier(loc, req.Spec.VolumeNamespace) if err != nil { - return r.patchResticRepository(ctx, req, func(rr *velerov1api.ResticRepository) { + return r.patchResticRepository(ctx, req, func(rr *velerov1api.BackupRepository) { rr.Status.Message = err.Error() - rr.Status.Phase = velerov1api.ResticRepositoryPhaseNotReady + rr.Status.Phase = velerov1api.BackupRepositoryPhaseNotReady if rr.Spec.MaintenanceFrequency.Duration <= 0 { rr.Spec.MaintenanceFrequency = metav1.Duration{Duration: r.defaultMaintenanceFrequency} @@ -140,7 +140,7 @@ func (r *ResticRepoReconciler) initializeRepo(ctx context.Context, req *velerov1 } // defaulting - if the patch fails, return an error so the item is returned to the queue - if err := r.patchResticRepository(ctx, req, func(rr *velerov1api.ResticRepository) { + if err := r.patchResticRepository(ctx, req, func(rr *velerov1api.BackupRepository) { rr.Spec.ResticIdentifier = repoIdentifier if rr.Spec.MaintenanceFrequency.Duration <= 0 { @@ -154,8 +154,8 @@ func (r *ResticRepoReconciler) initializeRepo(ctx context.Context, req *velerov1 return r.patchResticRepository(ctx, req, repoNotReady(err.Error())) } - return r.patchResticRepository(ctx, req, func(rr *velerov1api.ResticRepository) { - rr.Status.Phase = velerov1api.ResticRepositoryPhaseReady + return r.patchResticRepository(ctx, req, func(rr *velerov1api.BackupRepository) { + rr.Status.Phase = velerov1api.BackupRepositoryPhaseReady rr.Status.LastMaintenanceTime = &metav1.Time{Time: time.Now()} }) } @@ -163,7 +163,7 @@ func (r *ResticRepoReconciler) initializeRepo(ctx context.Context, req *velerov1 // ensureRepo checks to see if a repository exists, and attempts to initialize it if // it does not exist. An error is returned if the repository can't be connected to // or initialized. -func ensureRepo(repo *velerov1api.ResticRepository, repoManager restic.RepositoryManager) error { +func ensureRepo(repo *velerov1api.BackupRepository, repoManager restic.RepositoryManager) error { if err := repoManager.ConnectToRepo(repo); err != nil { // If the repository has not yet been initialized, the error message will always include // the following string. This is the only scenario where we should try to initialize it. @@ -179,7 +179,7 @@ func ensureRepo(repo *velerov1api.ResticRepository, repoManager restic.Repositor return nil } -func (r *ResticRepoReconciler) runMaintenanceIfDue(ctx context.Context, req *velerov1api.ResticRepository, log logrus.FieldLogger) error { +func (r *ResticRepoReconciler) runMaintenanceIfDue(ctx context.Context, req *velerov1api.BackupRepository, log logrus.FieldLogger) error { log.Debug("resticRepositoryController.runMaintenanceIfDue") now := r.clock.Now() @@ -196,21 +196,21 @@ func (r *ResticRepoReconciler) runMaintenanceIfDue(ctx context.Context, req *vel log.Debug("Pruning repo") if err := r.repositoryManager.PruneRepo(req); err != nil { log.WithError(err).Warn("error pruning repository") - return r.patchResticRepository(ctx, req, func(rr *velerov1api.ResticRepository) { + return r.patchResticRepository(ctx, req, func(rr *velerov1api.BackupRepository) { rr.Status.Message = err.Error() }) } - return r.patchResticRepository(ctx, req, func(rr *velerov1api.ResticRepository) { + return r.patchResticRepository(ctx, req, func(rr *velerov1api.BackupRepository) { rr.Status.LastMaintenanceTime = &metav1.Time{Time: now} }) } -func dueForMaintenance(req *velerov1api.ResticRepository, now time.Time) bool { +func dueForMaintenance(req *velerov1api.BackupRepository, now time.Time) bool { return req.Status.LastMaintenanceTime == nil || req.Status.LastMaintenanceTime.Add(req.Spec.MaintenanceFrequency.Duration).Before(now) } -func (r *ResticRepoReconciler) checkNotReadyRepo(ctx context.Context, req *velerov1api.ResticRepository, log logrus.FieldLogger) error { +func (r *ResticRepoReconciler) checkNotReadyRepo(ctx context.Context, req *velerov1api.BackupRepository, log logrus.FieldLogger) error { // no identifier: can't possibly be ready, so just return if req.Spec.ResticIdentifier == "" { return nil @@ -226,16 +226,16 @@ func (r *ResticRepoReconciler) checkNotReadyRepo(ctx context.Context, req *veler return r.patchResticRepository(ctx, req, repoReady()) } -func repoNotReady(msg string) func(*velerov1api.ResticRepository) { - return func(r *velerov1api.ResticRepository) { - r.Status.Phase = velerov1api.ResticRepositoryPhaseNotReady +func repoNotReady(msg string) func(*velerov1api.BackupRepository) { + return func(r *velerov1api.BackupRepository) { + r.Status.Phase = velerov1api.BackupRepositoryPhaseNotReady r.Status.Message = msg } } -func repoReady() func(*velerov1api.ResticRepository) { - return func(r *velerov1api.ResticRepository) { - r.Status.Phase = velerov1api.ResticRepositoryPhaseReady +func repoReady() func(*velerov1api.BackupRepository) { + return func(r *velerov1api.BackupRepository) { + r.Status.Phase = velerov1api.BackupRepositoryPhaseReady r.Status.Message = "" } } @@ -243,7 +243,7 @@ func repoReady() func(*velerov1api.ResticRepository) { // patchResticRepository mutates req with the provided mutate function, and patches it // through the Kube API. After executing this function, req will be updated with both // the mutation and the results of the Patch() API call. -func (r *ResticRepoReconciler) patchResticRepository(ctx context.Context, req *velerov1api.ResticRepository, mutate func(*velerov1api.ResticRepository)) error { +func (r *ResticRepoReconciler) patchResticRepository(ctx context.Context, req *velerov1api.BackupRepository, mutate func(*velerov1api.BackupRepository)) error { original := req.DeepCopy() mutate(req) if err := r.Patch(ctx, req, client.MergeFrom(original)); err != nil { diff --git a/pkg/controller/restic_repository_controller_test.go b/pkg/controller/restic_repository_controller_test.go index 2e6b4308a..28e899329 100644 --- a/pkg/controller/restic_repository_controller_test.go +++ b/pkg/controller/restic_repository_controller_test.go @@ -30,7 +30,7 @@ import ( const defaultMaintenanceFrequency = 10 * time.Minute -func mockResticRepoReconciler(t *testing.T, rr *velerov1api.ResticRepository, mockOn string, arg interface{}, ret interface{}) *ResticRepoReconciler { +func mockResticRepoReconciler(t *testing.T, rr *velerov1api.BackupRepository, mockOn string, arg interface{}, ret interface{}) *ResticRepoReconciler { mgr := &resticmokes.RepositoryManager{} if mockOn != "" { mgr.On(mockOn, arg).Return(ret) @@ -44,13 +44,13 @@ func mockResticRepoReconciler(t *testing.T, rr *velerov1api.ResticRepository, mo ) } -func mockResticRepositoryCR() *velerov1api.ResticRepository { - return &velerov1api.ResticRepository{ +func mockResticRepositoryCR() *velerov1api.BackupRepository { + return &velerov1api.BackupRepository{ ObjectMeta: metav1.ObjectMeta{ Namespace: velerov1api.DefaultNamespace, Name: "repo", }, - Spec: velerov1api.ResticRepositorySpec{ + Spec: velerov1api.BackupRepositorySpec{ MaintenanceFrequency: metav1.Duration{defaultMaintenanceFrequency}, }, } @@ -64,10 +64,10 @@ func TestPatchResticRepository(t *testing.T) { assert.NoError(t, err) err = reconciler.patchResticRepository(context.Background(), rr, repoReady()) assert.NoError(t, err) - assert.Equal(t, rr.Status.Phase, velerov1api.ResticRepositoryPhaseReady) + assert.Equal(t, rr.Status.Phase, velerov1api.BackupRepositoryPhaseReady) err = reconciler.patchResticRepository(context.Background(), rr, repoNotReady("not ready")) assert.NoError(t, err) - assert.NotEqual(t, rr.Status.Phase, velerov1api.ResticRepositoryPhaseReady) + assert.NotEqual(t, rr.Status.Phase, velerov1api.BackupRepositoryPhaseReady) } func TestCheckNotReadyRepo(t *testing.T) { @@ -77,11 +77,11 @@ func TestCheckNotReadyRepo(t *testing.T) { assert.NoError(t, err) err = reconciler.checkNotReadyRepo(context.TODO(), rr, reconciler.logger) assert.NoError(t, err) - assert.Equal(t, rr.Status.Phase, velerov1api.ResticRepositoryPhase("")) + assert.Equal(t, rr.Status.Phase, velerov1api.BackupRepositoryPhase("")) rr.Spec.ResticIdentifier = "s3:test.amazonaws.com/bucket/restic" err = reconciler.checkNotReadyRepo(context.TODO(), rr, reconciler.logger) assert.NoError(t, err) - assert.Equal(t, rr.Status.Phase, velerov1api.ResticRepositoryPhaseReady) + assert.Equal(t, rr.Status.Phase, velerov1api.BackupRepositoryPhaseReady) } func TestRunMaintenanceIfDue(t *testing.T) { @@ -121,23 +121,23 @@ func TestInitializeRepo(t *testing.T) { assert.NoError(t, err) err = reconciler.initializeRepo(context.TODO(), rr, reconciler.logger) assert.NoError(t, err) - assert.Equal(t, rr.Status.Phase, velerov1api.ResticRepositoryPhaseReady) + assert.Equal(t, rr.Status.Phase, velerov1api.BackupRepositoryPhaseReady) } func TestResticRepoReconcile(t *testing.T) { tests := []struct { name string - repo *velerov1api.ResticRepository + repo *velerov1api.BackupRepository expectNil bool }{ { name: "test on api server not found", - repo: &velerov1api.ResticRepository{ + repo: &velerov1api.BackupRepository{ ObjectMeta: metav1.ObjectMeta{ Namespace: velerov1api.DefaultNamespace, Name: "unknown", }, - Spec: velerov1api.ResticRepositorySpec{ + Spec: velerov1api.BackupRepositorySpec{ MaintenanceFrequency: metav1.Duration{defaultMaintenanceFrequency}, }, }, @@ -145,12 +145,12 @@ func TestResticRepoReconcile(t *testing.T) { }, { name: "test on initialize repo", - repo: &velerov1api.ResticRepository{ + repo: &velerov1api.BackupRepository{ ObjectMeta: metav1.ObjectMeta{ Namespace: velerov1api.DefaultNamespace, Name: "repo", }, - Spec: velerov1api.ResticRepositorySpec{ + Spec: velerov1api.BackupRepositorySpec{ MaintenanceFrequency: metav1.Duration{defaultMaintenanceFrequency}, }, }, @@ -158,16 +158,16 @@ func TestResticRepoReconcile(t *testing.T) { }, { name: "test on repo with new phase", - repo: &velerov1api.ResticRepository{ + repo: &velerov1api.BackupRepository{ ObjectMeta: metav1.ObjectMeta{ Namespace: velerov1api.DefaultNamespace, Name: "repo", }, - Spec: velerov1api.ResticRepositorySpec{ + Spec: velerov1api.BackupRepositorySpec{ MaintenanceFrequency: metav1.Duration{defaultMaintenanceFrequency}, }, - Status: velerov1api.ResticRepositoryStatus{ - Phase: velerov1api.ResticRepositoryPhaseNew, + Status: velerov1api.BackupRepositoryStatus{ + Phase: velerov1api.BackupRepositoryPhaseNew, }, }, expectNil: true, diff --git a/pkg/controller/restore_controller.go b/pkg/controller/restore_controller.go index 092b90002..559611fe0 100644 --- a/pkg/controller/restore_controller.go +++ b/pkg/controller/restore_controller.go @@ -38,6 +38,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/cache" + hook "github.com/vmware-tanzu/velero/internal/hook" api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" velerov1client "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1" @@ -71,10 +72,14 @@ var nonRestorableResources = []string{ // https://github.com/vmware-tanzu/velero/issues/622 "restores.velero.io", + // TODO: Remove this in v1.11 or v1.12 // Restic repositories are automatically managed by Velero and will be automatically // created as needed if they don't exist. // https://github.com/vmware-tanzu/velero/issues/1113 "resticrepositories.velero.io", + + // Backup repositories were renamed from Restic repositories + "backuprepositories.velero.io", } type restoreController struct { @@ -324,6 +329,22 @@ func (c *restoreController) validateAndComplete(restore *api.Restore, pluginMana return backupInfo{} } + // validate Restore Init Hook's InitContainers + restoreHooks, err := hook.GetRestoreHooksFromSpec(&restore.Spec.Hooks) + if err != nil { + restore.Status.ValidationErrors = append(restore.Status.ValidationErrors, err.Error()) + } + for _, resource := range restoreHooks { + for _, h := range resource.RestoreHooks { + for _, container := range h.Init.InitContainers { + err = hook.ValidateContainer(container.Raw) + if err != nil { + restore.Status.ValidationErrors = append(restore.Status.ValidationErrors, err.Error()) + } + } + } + } + // if ScheduleName is specified, fill in BackupName with the most recent successful backup from // the schedule if restore.Spec.ScheduleName != "" { diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/resticrepository.go b/pkg/generated/clientset/versioned/typed/velero/v1/backuprepository.go similarity index 53% rename from pkg/generated/clientset/versioned/typed/velero/v1/resticrepository.go rename to pkg/generated/clientset/versioned/typed/velero/v1/backuprepository.go index 44d5c0760..7ecef6dcf 100644 --- a/pkg/generated/clientset/versioned/typed/velero/v1/resticrepository.go +++ b/pkg/generated/clientset/versioned/typed/velero/v1/backuprepository.go @@ -30,46 +30,46 @@ import ( rest "k8s.io/client-go/rest" ) -// ResticRepositoriesGetter has a method to return a ResticRepositoryInterface. +// BackupRepositoriesGetter has a method to return a BackupRepositoryInterface. // A group's client should implement this interface. -type ResticRepositoriesGetter interface { - ResticRepositories(namespace string) ResticRepositoryInterface +type BackupRepositoriesGetter interface { + BackupRepositories(namespace string) BackupRepositoryInterface } -// ResticRepositoryInterface has methods to work with ResticRepository resources. -type ResticRepositoryInterface interface { - Create(ctx context.Context, resticRepository *v1.ResticRepository, opts metav1.CreateOptions) (*v1.ResticRepository, error) - Update(ctx context.Context, resticRepository *v1.ResticRepository, opts metav1.UpdateOptions) (*v1.ResticRepository, error) - UpdateStatus(ctx context.Context, resticRepository *v1.ResticRepository, opts metav1.UpdateOptions) (*v1.ResticRepository, error) +// BackupRepositoryInterface has methods to work with BackupRepository resources. +type BackupRepositoryInterface interface { + Create(ctx context.Context, backupRepository *v1.BackupRepository, opts metav1.CreateOptions) (*v1.BackupRepository, error) + Update(ctx context.Context, backupRepository *v1.BackupRepository, opts metav1.UpdateOptions) (*v1.BackupRepository, error) + UpdateStatus(ctx context.Context, backupRepository *v1.BackupRepository, opts metav1.UpdateOptions) (*v1.BackupRepository, error) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error - Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ResticRepository, error) - List(ctx context.Context, opts metav1.ListOptions) (*v1.ResticRepositoryList, error) + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.BackupRepository, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.BackupRepositoryList, error) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ResticRepository, err error) - ResticRepositoryExpansion + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.BackupRepository, err error) + BackupRepositoryExpansion } -// resticRepositories implements ResticRepositoryInterface -type resticRepositories struct { +// backupRepositories implements BackupRepositoryInterface +type backupRepositories struct { client rest.Interface ns string } -// newResticRepositories returns a ResticRepositories -func newResticRepositories(c *VeleroV1Client, namespace string) *resticRepositories { - return &resticRepositories{ +// newBackupRepositories returns a BackupRepositories +func newBackupRepositories(c *VeleroV1Client, namespace string) *backupRepositories { + return &backupRepositories{ client: c.RESTClient(), ns: namespace, } } -// Get takes name of the resticRepository, and returns the corresponding resticRepository object, and an error if there is any. -func (c *resticRepositories) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ResticRepository, err error) { - result = &v1.ResticRepository{} +// Get takes name of the backupRepository, and returns the corresponding backupRepository object, and an error if there is any. +func (c *backupRepositories) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.BackupRepository, err error) { + result = &v1.BackupRepository{} err = c.client.Get(). Namespace(c.ns). - Resource("resticrepositories"). + Resource("backuprepositories"). Name(name). VersionedParams(&options, scheme.ParameterCodec). Do(ctx). @@ -77,16 +77,16 @@ func (c *resticRepositories) Get(ctx context.Context, name string, options metav return } -// List takes label and field selectors, and returns the list of ResticRepositories that match those selectors. -func (c *resticRepositories) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ResticRepositoryList, err error) { +// List takes label and field selectors, and returns the list of BackupRepositories that match those selectors. +func (c *backupRepositories) List(ctx context.Context, opts metav1.ListOptions) (result *v1.BackupRepositoryList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - result = &v1.ResticRepositoryList{} + result = &v1.BackupRepositoryList{} err = c.client.Get(). Namespace(c.ns). - Resource("resticrepositories"). + Resource("backuprepositories"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). Do(ctx). @@ -94,8 +94,8 @@ func (c *resticRepositories) List(ctx context.Context, opts metav1.ListOptions) return } -// Watch returns a watch.Interface that watches the requested resticRepositories. -func (c *resticRepositories) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { +// Watch returns a watch.Interface that watches the requested backupRepositories. +func (c *backupRepositories) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -103,34 +103,34 @@ func (c *resticRepositories) Watch(ctx context.Context, opts metav1.ListOptions) opts.Watch = true return c.client.Get(). Namespace(c.ns). - Resource("resticrepositories"). + Resource("backuprepositories"). VersionedParams(&opts, scheme.ParameterCodec). Timeout(timeout). Watch(ctx) } -// Create takes the representation of a resticRepository and creates it. Returns the server's representation of the resticRepository, and an error, if there is any. -func (c *resticRepositories) Create(ctx context.Context, resticRepository *v1.ResticRepository, opts metav1.CreateOptions) (result *v1.ResticRepository, err error) { - result = &v1.ResticRepository{} +// Create takes the representation of a backupRepository and creates it. Returns the server's representation of the backupRepository, and an error, if there is any. +func (c *backupRepositories) Create(ctx context.Context, backupRepository *v1.BackupRepository, opts metav1.CreateOptions) (result *v1.BackupRepository, err error) { + result = &v1.BackupRepository{} err = c.client.Post(). Namespace(c.ns). - Resource("resticrepositories"). + Resource("backuprepositories"). VersionedParams(&opts, scheme.ParameterCodec). - Body(resticRepository). + Body(backupRepository). Do(ctx). Into(result) return } -// Update takes the representation of a resticRepository and updates it. Returns the server's representation of the resticRepository, and an error, if there is any. -func (c *resticRepositories) Update(ctx context.Context, resticRepository *v1.ResticRepository, opts metav1.UpdateOptions) (result *v1.ResticRepository, err error) { - result = &v1.ResticRepository{} +// Update takes the representation of a backupRepository and updates it. Returns the server's representation of the backupRepository, and an error, if there is any. +func (c *backupRepositories) Update(ctx context.Context, backupRepository *v1.BackupRepository, opts metav1.UpdateOptions) (result *v1.BackupRepository, err error) { + result = &v1.BackupRepository{} err = c.client.Put(). Namespace(c.ns). - Resource("resticrepositories"). - Name(resticRepository.Name). + Resource("backuprepositories"). + Name(backupRepository.Name). VersionedParams(&opts, scheme.ParameterCodec). - Body(resticRepository). + Body(backupRepository). Do(ctx). Into(result) return @@ -138,25 +138,25 @@ func (c *resticRepositories) Update(ctx context.Context, resticRepository *v1.Re // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *resticRepositories) UpdateStatus(ctx context.Context, resticRepository *v1.ResticRepository, opts metav1.UpdateOptions) (result *v1.ResticRepository, err error) { - result = &v1.ResticRepository{} +func (c *backupRepositories) UpdateStatus(ctx context.Context, backupRepository *v1.BackupRepository, opts metav1.UpdateOptions) (result *v1.BackupRepository, err error) { + result = &v1.BackupRepository{} err = c.client.Put(). Namespace(c.ns). - Resource("resticrepositories"). - Name(resticRepository.Name). + Resource("backuprepositories"). + Name(backupRepository.Name). SubResource("status"). VersionedParams(&opts, scheme.ParameterCodec). - Body(resticRepository). + Body(backupRepository). Do(ctx). Into(result) return } -// Delete takes name of the resticRepository and deletes it. Returns an error if one occurs. -func (c *resticRepositories) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { +// Delete takes name of the backupRepository and deletes it. Returns an error if one occurs. +func (c *backupRepositories) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). - Resource("resticrepositories"). + Resource("backuprepositories"). Name(name). Body(&opts). Do(ctx). @@ -164,14 +164,14 @@ func (c *resticRepositories) Delete(ctx context.Context, name string, opts metav } // DeleteCollection deletes a collection of objects. -func (c *resticRepositories) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { +func (c *backupRepositories) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { var timeout time.Duration if listOpts.TimeoutSeconds != nil { timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second } return c.client.Delete(). Namespace(c.ns). - Resource("resticrepositories"). + Resource("backuprepositories"). VersionedParams(&listOpts, scheme.ParameterCodec). Timeout(timeout). Body(&opts). @@ -179,12 +179,12 @@ func (c *resticRepositories) DeleteCollection(ctx context.Context, opts metav1.D Error() } -// Patch applies the patch and returns the patched resticRepository. -func (c *resticRepositories) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ResticRepository, err error) { - result = &v1.ResticRepository{} +// Patch applies the patch and returns the patched backupRepository. +func (c *backupRepositories) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.BackupRepository, err error) { + result = &v1.BackupRepository{} err = c.client.Patch(pt). Namespace(c.ns). - Resource("resticrepositories"). + Resource("backuprepositories"). Name(name). SubResource(subresources...). VersionedParams(&opts, scheme.ParameterCodec). diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_backuprepository.go b/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_backuprepository.go new file mode 100644 index 000000000..ef9d6b41c --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_backuprepository.go @@ -0,0 +1,142 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeBackupRepositories implements BackupRepositoryInterface +type FakeBackupRepositories struct { + Fake *FakeVeleroV1 + ns string +} + +var backuprepositoriesResource = schema.GroupVersionResource{Group: "velero.io", Version: "v1", Resource: "backuprepositories"} + +var backuprepositoriesKind = schema.GroupVersionKind{Group: "velero.io", Version: "v1", Kind: "BackupRepository"} + +// Get takes name of the backupRepository, and returns the corresponding backupRepository object, and an error if there is any. +func (c *FakeBackupRepositories) Get(ctx context.Context, name string, options v1.GetOptions) (result *velerov1.BackupRepository, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(backuprepositoriesResource, c.ns, name), &velerov1.BackupRepository{}) + + if obj == nil { + return nil, err + } + return obj.(*velerov1.BackupRepository), err +} + +// List takes label and field selectors, and returns the list of BackupRepositories that match those selectors. +func (c *FakeBackupRepositories) List(ctx context.Context, opts v1.ListOptions) (result *velerov1.BackupRepositoryList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(backuprepositoriesResource, backuprepositoriesKind, c.ns, opts), &velerov1.BackupRepositoryList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &velerov1.BackupRepositoryList{ListMeta: obj.(*velerov1.BackupRepositoryList).ListMeta} + for _, item := range obj.(*velerov1.BackupRepositoryList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested backupRepositories. +func (c *FakeBackupRepositories) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(backuprepositoriesResource, c.ns, opts)) + +} + +// Create takes the representation of a backupRepository and creates it. Returns the server's representation of the backupRepository, and an error, if there is any. +func (c *FakeBackupRepositories) Create(ctx context.Context, backupRepository *velerov1.BackupRepository, opts v1.CreateOptions) (result *velerov1.BackupRepository, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(backuprepositoriesResource, c.ns, backupRepository), &velerov1.BackupRepository{}) + + if obj == nil { + return nil, err + } + return obj.(*velerov1.BackupRepository), err +} + +// Update takes the representation of a backupRepository and updates it. Returns the server's representation of the backupRepository, and an error, if there is any. +func (c *FakeBackupRepositories) Update(ctx context.Context, backupRepository *velerov1.BackupRepository, opts v1.UpdateOptions) (result *velerov1.BackupRepository, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(backuprepositoriesResource, c.ns, backupRepository), &velerov1.BackupRepository{}) + + if obj == nil { + return nil, err + } + return obj.(*velerov1.BackupRepository), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeBackupRepositories) UpdateStatus(ctx context.Context, backupRepository *velerov1.BackupRepository, opts v1.UpdateOptions) (*velerov1.BackupRepository, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(backuprepositoriesResource, "status", c.ns, backupRepository), &velerov1.BackupRepository{}) + + if obj == nil { + return nil, err + } + return obj.(*velerov1.BackupRepository), err +} + +// Delete takes name of the backupRepository and deletes it. Returns an error if one occurs. +func (c *FakeBackupRepositories) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(backuprepositoriesResource, c.ns, name), &velerov1.BackupRepository{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeBackupRepositories) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(backuprepositoriesResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &velerov1.BackupRepositoryList{}) + return err +} + +// Patch applies the patch and returns the patched backupRepository. +func (c *FakeBackupRepositories) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *velerov1.BackupRepository, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(backuprepositoriesResource, c.ns, name, pt, data, subresources...), &velerov1.BackupRepository{}) + + if obj == nil { + return nil, err + } + return obj.(*velerov1.BackupRepository), err +} diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_resticrepository.go b/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_resticrepository.go deleted file mode 100644 index aeda0c9cb..000000000 --- a/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_resticrepository.go +++ /dev/null @@ -1,142 +0,0 @@ -/* -Copyright the Velero contributors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeResticRepositories implements ResticRepositoryInterface -type FakeResticRepositories struct { - Fake *FakeVeleroV1 - ns string -} - -var resticrepositoriesResource = schema.GroupVersionResource{Group: "velero.io", Version: "v1", Resource: "resticrepositories"} - -var resticrepositoriesKind = schema.GroupVersionKind{Group: "velero.io", Version: "v1", Kind: "ResticRepository"} - -// Get takes name of the resticRepository, and returns the corresponding resticRepository object, and an error if there is any. -func (c *FakeResticRepositories) Get(ctx context.Context, name string, options v1.GetOptions) (result *velerov1.ResticRepository, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(resticrepositoriesResource, c.ns, name), &velerov1.ResticRepository{}) - - if obj == nil { - return nil, err - } - return obj.(*velerov1.ResticRepository), err -} - -// List takes label and field selectors, and returns the list of ResticRepositories that match those selectors. -func (c *FakeResticRepositories) List(ctx context.Context, opts v1.ListOptions) (result *velerov1.ResticRepositoryList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(resticrepositoriesResource, resticrepositoriesKind, c.ns, opts), &velerov1.ResticRepositoryList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &velerov1.ResticRepositoryList{ListMeta: obj.(*velerov1.ResticRepositoryList).ListMeta} - for _, item := range obj.(*velerov1.ResticRepositoryList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested resticRepositories. -func (c *FakeResticRepositories) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(resticrepositoriesResource, c.ns, opts)) - -} - -// Create takes the representation of a resticRepository and creates it. Returns the server's representation of the resticRepository, and an error, if there is any. -func (c *FakeResticRepositories) Create(ctx context.Context, resticRepository *velerov1.ResticRepository, opts v1.CreateOptions) (result *velerov1.ResticRepository, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(resticrepositoriesResource, c.ns, resticRepository), &velerov1.ResticRepository{}) - - if obj == nil { - return nil, err - } - return obj.(*velerov1.ResticRepository), err -} - -// Update takes the representation of a resticRepository and updates it. Returns the server's representation of the resticRepository, and an error, if there is any. -func (c *FakeResticRepositories) Update(ctx context.Context, resticRepository *velerov1.ResticRepository, opts v1.UpdateOptions) (result *velerov1.ResticRepository, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(resticrepositoriesResource, c.ns, resticRepository), &velerov1.ResticRepository{}) - - if obj == nil { - return nil, err - } - return obj.(*velerov1.ResticRepository), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeResticRepositories) UpdateStatus(ctx context.Context, resticRepository *velerov1.ResticRepository, opts v1.UpdateOptions) (*velerov1.ResticRepository, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(resticrepositoriesResource, "status", c.ns, resticRepository), &velerov1.ResticRepository{}) - - if obj == nil { - return nil, err - } - return obj.(*velerov1.ResticRepository), err -} - -// Delete takes name of the resticRepository and deletes it. Returns an error if one occurs. -func (c *FakeResticRepositories) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(resticrepositoriesResource, c.ns, name), &velerov1.ResticRepository{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeResticRepositories) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(resticrepositoriesResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &velerov1.ResticRepositoryList{}) - return err -} - -// Patch applies the patch and returns the patched resticRepository. -func (c *FakeResticRepositories) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *velerov1.ResticRepository, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(resticrepositoriesResource, c.ns, name, pt, data, subresources...), &velerov1.ResticRepository{}) - - if obj == nil { - return nil, err - } - return obj.(*velerov1.ResticRepository), err -} diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_velero_client.go b/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_velero_client.go index e90115813..444c1f89f 100644 --- a/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_velero_client.go +++ b/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_velero_client.go @@ -32,6 +32,10 @@ func (c *FakeVeleroV1) Backups(namespace string) v1.BackupInterface { return &FakeBackups{c, namespace} } +func (c *FakeVeleroV1) BackupRepositories(namespace string) v1.BackupRepositoryInterface { + return &FakeBackupRepositories{c, namespace} +} + func (c *FakeVeleroV1) BackupStorageLocations(namespace string) v1.BackupStorageLocationInterface { return &FakeBackupStorageLocations{c, namespace} } @@ -52,10 +56,6 @@ func (c *FakeVeleroV1) PodVolumeRestores(namespace string) v1.PodVolumeRestoreIn return &FakePodVolumeRestores{c, namespace} } -func (c *FakeVeleroV1) ResticRepositories(namespace string) v1.ResticRepositoryInterface { - return &FakeResticRepositories{c, namespace} -} - func (c *FakeVeleroV1) Restores(namespace string) v1.RestoreInterface { return &FakeRestores{c, namespace} } diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/generated_expansion.go b/pkg/generated/clientset/versioned/typed/velero/v1/generated_expansion.go index 5deaaa51a..5032fd6a4 100644 --- a/pkg/generated/clientset/versioned/typed/velero/v1/generated_expansion.go +++ b/pkg/generated/clientset/versioned/typed/velero/v1/generated_expansion.go @@ -20,6 +20,8 @@ package v1 type BackupExpansion interface{} +type BackupRepositoryExpansion interface{} + type BackupStorageLocationExpansion interface{} type DeleteBackupRequestExpansion interface{} @@ -30,8 +32,6 @@ type PodVolumeBackupExpansion interface{} type PodVolumeRestoreExpansion interface{} -type ResticRepositoryExpansion interface{} - type RestoreExpansion interface{} type ScheduleExpansion interface{} diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/velero_client.go b/pkg/generated/clientset/versioned/typed/velero/v1/velero_client.go index 5758967ef..39f85628c 100644 --- a/pkg/generated/clientset/versioned/typed/velero/v1/velero_client.go +++ b/pkg/generated/clientset/versioned/typed/velero/v1/velero_client.go @@ -27,12 +27,12 @@ import ( type VeleroV1Interface interface { RESTClient() rest.Interface BackupsGetter + BackupRepositoriesGetter BackupStorageLocationsGetter DeleteBackupRequestsGetter DownloadRequestsGetter PodVolumeBackupsGetter PodVolumeRestoresGetter - ResticRepositoriesGetter RestoresGetter SchedulesGetter ServerStatusRequestsGetter @@ -48,6 +48,10 @@ func (c *VeleroV1Client) Backups(namespace string) BackupInterface { return newBackups(c, namespace) } +func (c *VeleroV1Client) BackupRepositories(namespace string) BackupRepositoryInterface { + return newBackupRepositories(c, namespace) +} + func (c *VeleroV1Client) BackupStorageLocations(namespace string) BackupStorageLocationInterface { return newBackupStorageLocations(c, namespace) } @@ -68,10 +72,6 @@ func (c *VeleroV1Client) PodVolumeRestores(namespace string) PodVolumeRestoreInt return newPodVolumeRestores(c, namespace) } -func (c *VeleroV1Client) ResticRepositories(namespace string) ResticRepositoryInterface { - return newResticRepositories(c, namespace) -} - func (c *VeleroV1Client) Restores(namespace string) RestoreInterface { return newRestores(c, namespace) } diff --git a/pkg/generated/informers/externalversions/generic.go b/pkg/generated/informers/externalversions/generic.go index 15770dceb..605887024 100644 --- a/pkg/generated/informers/externalversions/generic.go +++ b/pkg/generated/informers/externalversions/generic.go @@ -55,6 +55,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource // Group=velero.io, Version=v1 case v1.SchemeGroupVersion.WithResource("backups"): return &genericInformer{resource: resource.GroupResource(), informer: f.Velero().V1().Backups().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("backuprepositories"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Velero().V1().BackupRepositories().Informer()}, nil case v1.SchemeGroupVersion.WithResource("backupstoragelocations"): return &genericInformer{resource: resource.GroupResource(), informer: f.Velero().V1().BackupStorageLocations().Informer()}, nil case v1.SchemeGroupVersion.WithResource("deletebackuprequests"): @@ -65,8 +67,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Velero().V1().PodVolumeBackups().Informer()}, nil case v1.SchemeGroupVersion.WithResource("podvolumerestores"): return &genericInformer{resource: resource.GroupResource(), informer: f.Velero().V1().PodVolumeRestores().Informer()}, nil - case v1.SchemeGroupVersion.WithResource("resticrepositories"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Velero().V1().ResticRepositories().Informer()}, nil case v1.SchemeGroupVersion.WithResource("restores"): return &genericInformer{resource: resource.GroupResource(), informer: f.Velero().V1().Restores().Informer()}, nil case v1.SchemeGroupVersion.WithResource("schedules"): diff --git a/pkg/generated/informers/externalversions/velero/v1/resticrepository.go b/pkg/generated/informers/externalversions/velero/v1/backuprepository.go similarity index 70% rename from pkg/generated/informers/externalversions/velero/v1/resticrepository.go rename to pkg/generated/informers/externalversions/velero/v1/backuprepository.go index f92565554..59865c894 100644 --- a/pkg/generated/informers/externalversions/velero/v1/resticrepository.go +++ b/pkg/generated/informers/externalversions/velero/v1/backuprepository.go @@ -32,59 +32,59 @@ import ( cache "k8s.io/client-go/tools/cache" ) -// ResticRepositoryInformer provides access to a shared informer and lister for -// ResticRepositories. -type ResticRepositoryInformer interface { +// BackupRepositoryInformer provides access to a shared informer and lister for +// BackupRepositories. +type BackupRepositoryInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.ResticRepositoryLister + Lister() v1.BackupRepositoryLister } -type resticRepositoryInformer struct { +type backupRepositoryInformer struct { factory internalinterfaces.SharedInformerFactory tweakListOptions internalinterfaces.TweakListOptionsFunc namespace string } -// NewResticRepositoryInformer constructs a new informer for ResticRepository type. +// NewBackupRepositoryInformer constructs a new informer for BackupRepository type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewResticRepositoryInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredResticRepositoryInformer(client, namespace, resyncPeriod, indexers, nil) +func NewBackupRepositoryInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredBackupRepositoryInformer(client, namespace, resyncPeriod, indexers, nil) } -// NewFilteredResticRepositoryInformer constructs a new informer for ResticRepository type. +// NewFilteredBackupRepositoryInformer constructs a new informer for BackupRepository type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewFilteredResticRepositoryInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { +func NewFilteredBackupRepositoryInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.VeleroV1().ResticRepositories(namespace).List(context.TODO(), options) + return client.VeleroV1().BackupRepositories(namespace).List(context.TODO(), options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.VeleroV1().ResticRepositories(namespace).Watch(context.TODO(), options) + return client.VeleroV1().BackupRepositories(namespace).Watch(context.TODO(), options) }, }, - &velerov1.ResticRepository{}, + &velerov1.BackupRepository{}, resyncPeriod, indexers, ) } -func (f *resticRepositoryInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredResticRepositoryInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +func (f *backupRepositoryInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredBackupRepositoryInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } -func (f *resticRepositoryInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&velerov1.ResticRepository{}, f.defaultInformer) +func (f *backupRepositoryInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&velerov1.BackupRepository{}, f.defaultInformer) } -func (f *resticRepositoryInformer) Lister() v1.ResticRepositoryLister { - return v1.NewResticRepositoryLister(f.Informer().GetIndexer()) +func (f *backupRepositoryInformer) Lister() v1.BackupRepositoryLister { + return v1.NewBackupRepositoryLister(f.Informer().GetIndexer()) } diff --git a/pkg/generated/informers/externalversions/velero/v1/interface.go b/pkg/generated/informers/externalversions/velero/v1/interface.go index 981470c40..087dd3356 100644 --- a/pkg/generated/informers/externalversions/velero/v1/interface.go +++ b/pkg/generated/informers/externalversions/velero/v1/interface.go @@ -26,6 +26,8 @@ import ( type Interface interface { // Backups returns a BackupInformer. Backups() BackupInformer + // BackupRepositories returns a BackupRepositoryInformer. + BackupRepositories() BackupRepositoryInformer // BackupStorageLocations returns a BackupStorageLocationInformer. BackupStorageLocations() BackupStorageLocationInformer // DeleteBackupRequests returns a DeleteBackupRequestInformer. @@ -36,8 +38,6 @@ type Interface interface { PodVolumeBackups() PodVolumeBackupInformer // PodVolumeRestores returns a PodVolumeRestoreInformer. PodVolumeRestores() PodVolumeRestoreInformer - // ResticRepositories returns a ResticRepositoryInformer. - ResticRepositories() ResticRepositoryInformer // Restores returns a RestoreInformer. Restores() RestoreInformer // Schedules returns a ScheduleInformer. @@ -64,6 +64,11 @@ func (v *version) Backups() BackupInformer { return &backupInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } +// BackupRepositories returns a BackupRepositoryInformer. +func (v *version) BackupRepositories() BackupRepositoryInformer { + return &backupRepositoryInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // BackupStorageLocations returns a BackupStorageLocationInformer. func (v *version) BackupStorageLocations() BackupStorageLocationInformer { return &backupStorageLocationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} @@ -89,11 +94,6 @@ func (v *version) PodVolumeRestores() PodVolumeRestoreInformer { return &podVolumeRestoreInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } -// ResticRepositories returns a ResticRepositoryInformer. -func (v *version) ResticRepositories() ResticRepositoryInformer { - return &resticRepositoryInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - // Restores returns a RestoreInformer. func (v *version) Restores() RestoreInformer { return &restoreInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/pkg/generated/listers/velero/v1/backuprepository.go b/pkg/generated/listers/velero/v1/backuprepository.go new file mode 100644 index 000000000..ef619baf1 --- /dev/null +++ b/pkg/generated/listers/velero/v1/backuprepository.go @@ -0,0 +1,99 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// BackupRepositoryLister helps list BackupRepositories. +// All objects returned here must be treated as read-only. +type BackupRepositoryLister interface { + // List lists all BackupRepositories in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.BackupRepository, err error) + // BackupRepositories returns an object that can list and get BackupRepositories. + BackupRepositories(namespace string) BackupRepositoryNamespaceLister + BackupRepositoryListerExpansion +} + +// backupRepositoryLister implements the BackupRepositoryLister interface. +type backupRepositoryLister struct { + indexer cache.Indexer +} + +// NewBackupRepositoryLister returns a new BackupRepositoryLister. +func NewBackupRepositoryLister(indexer cache.Indexer) BackupRepositoryLister { + return &backupRepositoryLister{indexer: indexer} +} + +// List lists all BackupRepositories in the indexer. +func (s *backupRepositoryLister) List(selector labels.Selector) (ret []*v1.BackupRepository, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.BackupRepository)) + }) + return ret, err +} + +// BackupRepositories returns an object that can list and get BackupRepositories. +func (s *backupRepositoryLister) BackupRepositories(namespace string) BackupRepositoryNamespaceLister { + return backupRepositoryNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// BackupRepositoryNamespaceLister helps list and get BackupRepositories. +// All objects returned here must be treated as read-only. +type BackupRepositoryNamespaceLister interface { + // List lists all BackupRepositories in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.BackupRepository, err error) + // Get retrieves the BackupRepository from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.BackupRepository, error) + BackupRepositoryNamespaceListerExpansion +} + +// backupRepositoryNamespaceLister implements the BackupRepositoryNamespaceLister +// interface. +type backupRepositoryNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all BackupRepositories in the indexer for a given namespace. +func (s backupRepositoryNamespaceLister) List(selector labels.Selector) (ret []*v1.BackupRepository, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.BackupRepository)) + }) + return ret, err +} + +// Get retrieves the BackupRepository from the indexer for a given namespace and name. +func (s backupRepositoryNamespaceLister) Get(name string) (*v1.BackupRepository, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("backuprepository"), name) + } + return obj.(*v1.BackupRepository), nil +} diff --git a/pkg/generated/listers/velero/v1/expansion_generated.go b/pkg/generated/listers/velero/v1/expansion_generated.go index b57656650..c0cd57654 100644 --- a/pkg/generated/listers/velero/v1/expansion_generated.go +++ b/pkg/generated/listers/velero/v1/expansion_generated.go @@ -26,6 +26,14 @@ type BackupListerExpansion interface{} // BackupNamespaceLister. type BackupNamespaceListerExpansion interface{} +// BackupRepositoryListerExpansion allows custom methods to be added to +// BackupRepositoryLister. +type BackupRepositoryListerExpansion interface{} + +// BackupRepositoryNamespaceListerExpansion allows custom methods to be added to +// BackupRepositoryNamespaceLister. +type BackupRepositoryNamespaceListerExpansion interface{} + // BackupStorageLocationListerExpansion allows custom methods to be added to // BackupStorageLocationLister. type BackupStorageLocationListerExpansion interface{} @@ -66,14 +74,6 @@ type PodVolumeRestoreListerExpansion interface{} // PodVolumeRestoreNamespaceLister. type PodVolumeRestoreNamespaceListerExpansion interface{} -// ResticRepositoryListerExpansion allows custom methods to be added to -// ResticRepositoryLister. -type ResticRepositoryListerExpansion interface{} - -// ResticRepositoryNamespaceListerExpansion allows custom methods to be added to -// ResticRepositoryNamespaceLister. -type ResticRepositoryNamespaceListerExpansion interface{} - // RestoreListerExpansion allows custom methods to be added to // RestoreLister. type RestoreListerExpansion interface{} diff --git a/pkg/generated/listers/velero/v1/resticrepository.go b/pkg/generated/listers/velero/v1/resticrepository.go deleted file mode 100644 index 96bcfdc7c..000000000 --- a/pkg/generated/listers/velero/v1/resticrepository.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright the Velero contributors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ResticRepositoryLister helps list ResticRepositories. -// All objects returned here must be treated as read-only. -type ResticRepositoryLister interface { - // List lists all ResticRepositories in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ResticRepository, err error) - // ResticRepositories returns an object that can list and get ResticRepositories. - ResticRepositories(namespace string) ResticRepositoryNamespaceLister - ResticRepositoryListerExpansion -} - -// resticRepositoryLister implements the ResticRepositoryLister interface. -type resticRepositoryLister struct { - indexer cache.Indexer -} - -// NewResticRepositoryLister returns a new ResticRepositoryLister. -func NewResticRepositoryLister(indexer cache.Indexer) ResticRepositoryLister { - return &resticRepositoryLister{indexer: indexer} -} - -// List lists all ResticRepositories in the indexer. -func (s *resticRepositoryLister) List(selector labels.Selector) (ret []*v1.ResticRepository, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ResticRepository)) - }) - return ret, err -} - -// ResticRepositories returns an object that can list and get ResticRepositories. -func (s *resticRepositoryLister) ResticRepositories(namespace string) ResticRepositoryNamespaceLister { - return resticRepositoryNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// ResticRepositoryNamespaceLister helps list and get ResticRepositories. -// All objects returned here must be treated as read-only. -type ResticRepositoryNamespaceLister interface { - // List lists all ResticRepositories in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1.ResticRepository, err error) - // Get retrieves the ResticRepository from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1.ResticRepository, error) - ResticRepositoryNamespaceListerExpansion -} - -// resticRepositoryNamespaceLister implements the ResticRepositoryNamespaceLister -// interface. -type resticRepositoryNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ResticRepositories in the indexer for a given namespace. -func (s resticRepositoryNamespaceLister) List(selector labels.Selector) (ret []*v1.ResticRepository, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.ResticRepository)) - }) - return ret, err -} - -// Get retrieves the ResticRepository from the indexer for a given namespace and name. -func (s resticRepositoryNamespaceLister) Get(name string) (*v1.ResticRepository, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("resticrepository"), name) - } - return obj.(*v1.ResticRepository), nil -} diff --git a/pkg/persistence/object_store.go b/pkg/persistence/object_store.go index 1c0b619e1..acda15323 100644 --- a/pkg/persistence/object_store.go +++ b/pkg/persistence/object_store.go @@ -131,19 +131,25 @@ func (b *objectBackupStoreGetter) Get(location *velerov1api.BackupStorageLocatio return nil, errors.Errorf("backup storage location's bucket name %q must not contain a '/' (if using a prefix, put it in the 'Prefix' field instead)", location.Spec.ObjectStorage.Bucket) } + // Pass a new map into the object store rather than modifying the passed-in + // location. This prevents Velero controllers from accidentally modifying + // the in-cluster BSL with data which doesn't belong in Spec.Config + objectStoreConfig := make(map[string]string) + if location.Spec.Config != nil { + for key, val := range location.Spec.Config { + objectStoreConfig[key] = val + } + } + // add the bucket name and prefix to the config map so that object stores // can use them when initializing. The AWS object store uses the bucket // name to determine the bucket's region when setting up its client. - if location.Spec.Config == nil { - location.Spec.Config = make(map[string]string) - } - - location.Spec.Config["bucket"] = bucket - location.Spec.Config["prefix"] = prefix + objectStoreConfig["bucket"] = bucket + objectStoreConfig["prefix"] = prefix // Only include a CACert if it's specified in order to maintain compatibility with plugins that don't expect it. if location.Spec.ObjectStorage.CACert != nil { - location.Spec.Config["caCert"] = string(location.Spec.ObjectStorage.CACert) + objectStoreConfig["caCert"] = string(location.Spec.ObjectStorage.CACert) } // If the BSL specifies a credential, fetch its path on disk and pass to @@ -154,7 +160,7 @@ func (b *objectBackupStoreGetter) Get(location *velerov1api.BackupStorageLocatio return nil, errors.Wrap(err, "unable to get credentials") } - location.Spec.Config["credentialsFile"] = credsFile + objectStoreConfig["credentialsFile"] = credsFile } objectStore, err := objectStoreGetter.GetObjectStore(location.Spec.Provider) @@ -162,7 +168,7 @@ func (b *objectBackupStoreGetter) Get(location *velerov1api.BackupStorageLocatio return nil, err } - if err := objectStore.Init(location.Spec.Config); err != nil { + if err := objectStore.Init(objectStoreConfig); err != nil { return nil, err } diff --git a/pkg/plugin/framework/handle_panic.go b/pkg/plugin/framework/handle_panic.go index 10eb1d2b9..4ea0ec2b5 100644 --- a/pkg/plugin/framework/handle_panic.go +++ b/pkg/plugin/framework/handle_panic.go @@ -17,6 +17,8 @@ limitations under the License. package framework import ( + "runtime/debug" + "github.com/pkg/errors" "google.golang.org/grpc/codes" ) @@ -38,7 +40,8 @@ func handlePanic(p interface{}) error { if _, ok := panicErr.(stackTracer); ok { err = panicErr } else { - err = errors.Wrap(panicErr, "plugin panicked") + errWithStacktrace := errors.Errorf("%v, stack trace: %s", panicErr, debug.Stack()) + err = errors.Wrap(errWithStacktrace, "plugin panicked") } } diff --git a/pkg/plugin/framework/server.go b/pkg/plugin/framework/server.go index 066a44db6..b25ef341d 100644 --- a/pkg/plugin/framework/server.go +++ b/pkg/plugin/framework/server.go @@ -25,7 +25,6 @@ import ( "github.com/sirupsen/logrus" "github.com/spf13/pflag" - veleroflag "github.com/vmware-tanzu/velero/pkg/cmd/util/flag" "github.com/vmware-tanzu/velero/pkg/util/logging" ) @@ -78,6 +77,7 @@ type Server interface { // RegisterItemSnapshotters registers multiple Item Snapshotters RegisterItemSnapshotters(map[string]HandlerInitializer) Server + // Server runs the plugin server. Serve() } @@ -87,7 +87,6 @@ type server struct { log *logrus.Logger logLevelFlag *logging.LevelFlag flagSet *pflag.FlagSet - featureSet *veleroflag.StringArray backupItemAction *BackupItemActionPlugin volumeSnapshotter *VolumeSnapshotterPlugin objectStore *ObjectStorePlugin @@ -99,12 +98,10 @@ type server struct { // NewServer returns a new Server func NewServer() Server { log := newLogger() - features := veleroflag.NewStringArray() return &server{ log: log, logLevelFlag: logging.LogLevelFlag(log.Level), - featureSet: &features, backupItemAction: NewBackupItemActionPlugin(serverLogger(log)), volumeSnapshotter: NewVolumeSnapshotterPlugin(serverLogger(log)), objectStore: NewObjectStorePlugin(serverLogger(log)), @@ -116,7 +113,6 @@ func NewServer() Server { func (s *server) BindFlags(flags *pflag.FlagSet) Server { flags.Var(s.logLevelFlag, "log-level", fmt.Sprintf("The level at which to log. Valid values are %s.", strings.Join(s.logLevelFlag.AllowedValues(), ", "))) - flags.Var(s.featureSet, "features", "List of feature flags for this plugin") s.flagSet = flags s.flagSet.ParseErrorsWhitelist.UnknownFlags = true diff --git a/pkg/restic/backupper.go b/pkg/podvolume/backupper.go similarity index 88% rename from pkg/restic/backupper.go rename to pkg/podvolume/backupper.go index fd366a36b..116a5c4e7 100644 --- a/pkg/restic/backupper.go +++ b/pkg/podvolume/backupper.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package restic +package podvolume import ( "context" @@ -30,7 +30,9 @@ import ( "k8s.io/client-go/tools/cache" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + clientset "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" "github.com/vmware-tanzu/velero/pkg/label" + "github.com/vmware-tanzu/velero/pkg/repository" "github.com/vmware-tanzu/velero/pkg/util/boolptr" ) @@ -41,11 +43,12 @@ type Backupper interface { } type backupper struct { - ctx context.Context - repoManager *repositoryManager - repoEnsurer *repositoryEnsurer - pvcClient corev1client.PersistentVolumeClaimsGetter - pvClient corev1client.PersistentVolumesGetter + ctx context.Context + repoLocker *repository.RepoLocker + repoEnsurer *repository.RepositoryEnsurer + veleroClient clientset.Interface + pvcClient corev1client.PersistentVolumeClaimsGetter + pvClient corev1client.PersistentVolumesGetter results map[string]chan *velerov1api.PodVolumeBackup resultsLock sync.Mutex @@ -53,19 +56,21 @@ type backupper struct { func newBackupper( ctx context.Context, - repoManager *repositoryManager, - repoEnsurer *repositoryEnsurer, + repoLocker *repository.RepoLocker, + repoEnsurer *repository.RepositoryEnsurer, podVolumeBackupInformer cache.SharedIndexInformer, + veleroClient clientset.Interface, pvcClient corev1client.PersistentVolumeClaimsGetter, pvClient corev1client.PersistentVolumesGetter, log logrus.FieldLogger, ) *backupper { b := &backupper{ - ctx: ctx, - repoManager: repoManager, - repoEnsurer: repoEnsurer, - pvcClient: pvcClient, - pvClient: pvClient, + ctx: ctx, + repoLocker: repoLocker, + repoEnsurer: repoEnsurer, + veleroClient: veleroClient, + pvcClient: pvcClient, + pvClient: pvClient, results: make(map[string]chan *velerov1api.PodVolumeBackup), } @@ -109,8 +114,8 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api. // get a single non-exclusive lock since we'll wait for all individual // backups to be complete before releasing it. - b.repoManager.repoLocker.Lock(repo.Name) - defer b.repoManager.repoLocker.Unlock(repo.Name) + b.repoLocker.Lock(repo.Name) + defer b.repoLocker.Unlock(repo.Name) resultsChan := make(chan *velerov1api.PodVolumeBackup) @@ -177,8 +182,9 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api. continue } - volumeBackup := newPodVolumeBackup(backup, pod, volume, repo.Spec.ResticIdentifier, pvc) - if volumeBackup, err = b.repoManager.veleroClient.VeleroV1().PodVolumeBackups(volumeBackup.Namespace).Create(context.TODO(), volumeBackup, metav1.CreateOptions{}); err != nil { + // TODO: Remove the hard-coded uploader type before v1.10 FC + volumeBackup := newPodVolumeBackup(backup, pod, volume, repo.Spec.ResticIdentifier, "restic", pvc) + if volumeBackup, err = b.veleroClient.VeleroV1().PodVolumeBackups(volumeBackup.Namespace).Create(context.TODO(), volumeBackup, metav1.CreateOptions{}); err != nil { errs = append(errs, err) continue } @@ -236,7 +242,7 @@ func isHostPathVolume(volume *corev1api.Volume, pvc *corev1api.PersistentVolumeC return pv.Spec.HostPath != nil, nil } -func newPodVolumeBackup(backup *velerov1api.Backup, pod *corev1api.Pod, volume corev1api.Volume, repoIdentifier string, pvc *corev1api.PersistentVolumeClaim) *velerov1api.PodVolumeBackup { +func newPodVolumeBackup(backup *velerov1api.Backup, pod *corev1api.Pod, volume corev1api.Volume, repoIdentifier, uploaderType string, pvc *corev1api.PersistentVolumeClaim) *velerov1api.PodVolumeBackup { pvb := &velerov1api.PodVolumeBackup{ ObjectMeta: metav1.ObjectMeta{ Namespace: backup.Namespace, @@ -274,6 +280,7 @@ func newPodVolumeBackup(backup *velerov1api.Backup, pod *corev1api.Pod, volume c }, BackupStorageLocation: backup.Spec.StorageLocation, RepoIdentifier: repoIdentifier, + UploaderType: uploaderType, }, } diff --git a/pkg/podvolume/backupper_factory.go b/pkg/podvolume/backupper_factory.go new file mode 100644 index 000000000..aaaa5e2ac --- /dev/null +++ b/pkg/podvolume/backupper_factory.go @@ -0,0 +1,88 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podvolume + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + clientset "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" + velerov1informers "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1" + "github.com/vmware-tanzu/velero/pkg/repository" +) + +// BackupperFactory can construct pod volumes backuppers. +type BackupperFactory interface { + // NewBackupper returns a pod volumes backupper for use during a single Velero backup. + NewBackupper(context.Context, *velerov1api.Backup) (Backupper, error) +} + +func NewBackupperFactory(repoLocker *repository.RepoLocker, + repoEnsurer *repository.RepositoryEnsurer, + veleroClient clientset.Interface, + pvcClient corev1client.PersistentVolumeClaimsGetter, + pvClient corev1client.PersistentVolumesGetter, + repoInformerSynced cache.InformerSynced, + log logrus.FieldLogger) BackupperFactory { + return &backupperFactory{ + repoLocker: repoLocker, + repoEnsurer: repoEnsurer, + veleroClient: veleroClient, + pvcClient: pvcClient, + pvClient: pvClient, + repoInformerSynced: repoInformerSynced, + log: log, + } +} + +type backupperFactory struct { + repoLocker *repository.RepoLocker + repoEnsurer *repository.RepositoryEnsurer + veleroClient clientset.Interface + pvcClient corev1client.PersistentVolumeClaimsGetter + pvClient corev1client.PersistentVolumesGetter + repoInformerSynced cache.InformerSynced + log logrus.FieldLogger +} + +func (bf *backupperFactory) NewBackupper(ctx context.Context, backup *velerov1api.Backup) (Backupper, error) { + informer := velerov1informers.NewFilteredPodVolumeBackupInformer( + bf.veleroClient, + backup.Namespace, + 0, + cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, + func(opts *metav1.ListOptions) { + opts.LabelSelector = fmt.Sprintf("%s=%s", velerov1api.BackupUIDLabel, backup.UID) + }, + ) + + b := newBackupper(ctx, bf.repoLocker, bf.repoEnsurer, informer, bf.veleroClient, bf.pvcClient, bf.pvClient, bf.log) + + go informer.Run(ctx.Done()) + if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced, bf.repoInformerSynced) { + return nil, errors.New("timed out waiting for caches to sync") + } + + return b, nil +} diff --git a/pkg/restic/backupper_test.go b/pkg/podvolume/backupper_test.go similarity index 99% rename from pkg/restic/backupper_test.go rename to pkg/podvolume/backupper_test.go index 8969f6efa..fb0cacd1a 100644 --- a/pkg/restic/backupper_test.go +++ b/pkg/podvolume/backupper_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package restic +package podvolume import ( "context" diff --git a/pkg/restic/mocks/restorer.go b/pkg/podvolume/mocks/restorer.go similarity index 60% rename from pkg/restic/mocks/restorer.go rename to pkg/podvolume/mocks/restorer.go index 7f4f5c1d8..fd210aa00 100644 --- a/pkg/restic/mocks/restorer.go +++ b/pkg/podvolume/mocks/restorer.go @@ -2,8 +2,10 @@ package mocks -import mock "github.com/stretchr/testify/mock" -import restic "github.com/vmware-tanzu/velero/pkg/restic" +import ( + mock "github.com/stretchr/testify/mock" + "github.com/vmware-tanzu/velero/pkg/podvolume" +) // Restorer is an autogenerated mock type for the Restorer type type Restorer struct { @@ -11,11 +13,11 @@ type Restorer struct { } // RestorePodVolumes provides a mock function with given fields: _a0 -func (_m *Restorer) RestorePodVolumes(_a0 restic.RestoreData) []error { +func (_m *Restorer) RestorePodVolumes(_a0 podvolume.RestoreData) []error { ret := _m.Called(_a0) var r0 []error - if rf, ok := ret.Get(0).(func(restic.RestoreData) []error); ok { + if rf, ok := ret.Get(0).(func(podvolume.RestoreData) []error); ok { r0 = rf(_a0) } else { if ret.Get(0) != nil { diff --git a/pkg/restic/restorer.go b/pkg/podvolume/restorer.go similarity index 84% rename from pkg/restic/restorer.go rename to pkg/podvolume/restorer.go index 242cc717d..daa3a630d 100644 --- a/pkg/restic/restorer.go +++ b/pkg/podvolume/restorer.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package restic +package podvolume import ( "context" @@ -28,7 +28,9 @@ import ( "k8s.io/client-go/tools/cache" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + clientset "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" "github.com/vmware-tanzu/velero/pkg/label" + "github.com/vmware-tanzu/velero/pkg/repository" "github.com/vmware-tanzu/velero/pkg/util/boolptr" ) @@ -46,10 +48,11 @@ type Restorer interface { } type restorer struct { - ctx context.Context - repoManager *repositoryManager - repoEnsurer *repositoryEnsurer - pvcClient corev1client.PersistentVolumeClaimsGetter + ctx context.Context + repoLocker *repository.RepoLocker + repoEnsurer *repository.RepositoryEnsurer + veleroClient clientset.Interface + pvcClient corev1client.PersistentVolumeClaimsGetter resultsLock sync.Mutex results map[string]chan *velerov1api.PodVolumeRestore @@ -57,17 +60,19 @@ type restorer struct { func newRestorer( ctx context.Context, - rm *repositoryManager, - repoEnsurer *repositoryEnsurer, + repoLocker *repository.RepoLocker, + repoEnsurer *repository.RepositoryEnsurer, podVolumeRestoreInformer cache.SharedIndexInformer, + veleroClient clientset.Interface, pvcClient corev1client.PersistentVolumeClaimsGetter, log logrus.FieldLogger, ) *restorer { r := &restorer{ - ctx: ctx, - repoManager: rm, - repoEnsurer: repoEnsurer, - pvcClient: pvcClient, + ctx: ctx, + repoLocker: repoLocker, + repoEnsurer: repoEnsurer, + veleroClient: veleroClient, + pvcClient: pvcClient, results: make(map[string]chan *velerov1api.PodVolumeRestore), } @@ -108,8 +113,8 @@ func (r *restorer) RestorePodVolumes(data RestoreData) []error { // get a single non-exclusive lock since we'll wait for all individual // restores to be complete before releasing it. - r.repoManager.repoLocker.Lock(repo.Name) - defer r.repoManager.repoLocker.Unlock(repo.Name) + r.repoLocker.Lock(repo.Name) + defer r.repoLocker.Unlock(repo.Name) resultsChan := make(chan *velerov1api.PodVolumeRestore) @@ -139,10 +144,10 @@ func (r *restorer) RestorePodVolumes(data RestoreData) []error { } } } + // TODO: Remove the hard-coded uploader type before v1.10 FC + volumeRestore := newPodVolumeRestore(data.Restore, data.Pod, data.BackupLocation, volume, snapshot, repo.Spec.ResticIdentifier, "restic", pvc) - volumeRestore := newPodVolumeRestore(data.Restore, data.Pod, data.BackupLocation, volume, snapshot, repo.Spec.ResticIdentifier, pvc) - - if err := errorOnly(r.repoManager.veleroClient.VeleroV1().PodVolumeRestores(volumeRestore.Namespace).Create(context.TODO(), volumeRestore, metav1.CreateOptions{})); err != nil { + if err := errorOnly(r.veleroClient.VeleroV1().PodVolumeRestores(volumeRestore.Namespace).Create(context.TODO(), volumeRestore, metav1.CreateOptions{})); err != nil { errs = append(errs, errors.WithStack(err)) continue } @@ -169,7 +174,7 @@ ForEachVolume: return errs } -func newPodVolumeRestore(restore *velerov1api.Restore, pod *corev1api.Pod, backupLocation, volume, snapshot, repoIdentifier string, pvc *corev1api.PersistentVolumeClaim) *velerov1api.PodVolumeRestore { +func newPodVolumeRestore(restore *velerov1api.Restore, pod *corev1api.Pod, backupLocation, volume, snapshot, repoIdentifier, uploaderType string, pvc *corev1api.PersistentVolumeClaim) *velerov1api.PodVolumeRestore { pvr := &velerov1api.PodVolumeRestore{ ObjectMeta: metav1.ObjectMeta{ Namespace: restore.Namespace, diff --git a/pkg/podvolume/restorer_factory.go b/pkg/podvolume/restorer_factory.go new file mode 100644 index 000000000..3432f4be3 --- /dev/null +++ b/pkg/podvolume/restorer_factory.go @@ -0,0 +1,85 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podvolume + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + clientset "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned" + velerov1informers "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1" + "github.com/vmware-tanzu/velero/pkg/repository" +) + +// RestorerFactory can construct pod volumes restorers. +type RestorerFactory interface { + // NewRestorer returns a pod volumes restorer for use during a single Velero restore. + NewRestorer(context.Context, *velerov1api.Restore) (Restorer, error) +} + +func NewRestorerFactory(repoLocker *repository.RepoLocker, + repoEnsurer *repository.RepositoryEnsurer, + veleroClient clientset.Interface, + pvcClient corev1client.PersistentVolumeClaimsGetter, + repoInformerSynced cache.InformerSynced, + log logrus.FieldLogger) RestorerFactory { + return &restorerFactory{ + repoLocker: repoLocker, + repoEnsurer: repoEnsurer, + veleroClient: veleroClient, + pvcClient: pvcClient, + repoInformerSynced: repoInformerSynced, + log: log, + } +} + +type restorerFactory struct { + repoLocker *repository.RepoLocker + repoEnsurer *repository.RepositoryEnsurer + veleroClient clientset.Interface + pvcClient corev1client.PersistentVolumeClaimsGetter + repoInformerSynced cache.InformerSynced + log logrus.FieldLogger +} + +func (rf *restorerFactory) NewRestorer(ctx context.Context, restore *velerov1api.Restore) (Restorer, error) { + informer := velerov1informers.NewFilteredPodVolumeRestoreInformer( + rf.veleroClient, + restore.Namespace, + 0, + cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, + func(opts *metav1.ListOptions) { + opts.LabelSelector = fmt.Sprintf("%s=%s", velerov1api.RestoreUIDLabel, restore.UID) + }, + ) + + r := newRestorer(ctx, rf.repoLocker, rf.repoEnsurer, informer, rf.veleroClient, rf.pvcClient, rf.log) + + go informer.Run(ctx.Done()) + if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced, rf.repoInformerSynced) { + return nil, errors.New("timed out waiting for cache to sync") + } + + return r, nil +} diff --git a/pkg/podvolume/util.go b/pkg/podvolume/util.go new file mode 100644 index 000000000..57baacc10 --- /dev/null +++ b/pkg/podvolume/util.go @@ -0,0 +1,198 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podvolume + +import ( + "strings" + + corev1api "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" +) + +const ( + // PVCNameAnnotation is the key for the annotation added to + // pod volume backups when they're for a PVC. + PVCNameAnnotation = "velero.io/pvc-name" + + // Deprecated. + // + // TODO(2.0): remove + podAnnotationPrefix = "snapshot.velero.io/" + + // VolumesToBackupAnnotation is the annotation on a pod whose mounted volumes + // need to be backed up using restic. + VolumesToBackupAnnotation = "backup.velero.io/backup-volumes" + + // VolumesToExcludeAnnotation is the annotation on a pod whose mounted volumes + // should be excluded from restic backup. + VolumesToExcludeAnnotation = "backup.velero.io/backup-volumes-excludes" +) + +// GetVolumeBackupsForPod returns a map, of volume name -> snapshot id, +// of the PodVolumeBackups that exist for the provided pod. +func GetVolumeBackupsForPod(podVolumeBackups []*velerov1api.PodVolumeBackup, pod *corev1api.Pod, sourcePodNs string) map[string]string { + volumes := make(map[string]string) + + for _, pvb := range podVolumeBackups { + if !isPVBMatchPod(pvb, pod.GetName(), sourcePodNs) { + continue + } + + // skip PVBs without a snapshot ID since there's nothing + // to restore (they could be failed, or for empty volumes). + if pvb.Status.SnapshotID == "" { + continue + } + + // If the volume came from a projected or DownwardAPI source, skip its restore. + // This allows backups affected by https://github.com/vmware-tanzu/velero/issues/3863 + // or https://github.com/vmware-tanzu/velero/issues/4053 to be restored successfully. + if volumeHasNonRestorableSource(pvb.Spec.Volume, pod.Spec.Volumes) { + continue + } + + volumes[pvb.Spec.Volume] = pvb.Status.SnapshotID + } + + if len(volumes) > 0 { + return volumes + } + + return getPodSnapshotAnnotations(pod) +} + +func isPVBMatchPod(pvb *velerov1api.PodVolumeBackup, podName string, namespace string) bool { + return podName == pvb.Spec.Pod.Name && namespace == pvb.Spec.Pod.Namespace +} + +// volumeHasNonRestorableSource checks if the given volume exists in the list of podVolumes +// and returns true if the volume's source is not restorable. This is true for volumes with +// a Projected or DownwardAPI source. +func volumeHasNonRestorableSource(volumeName string, podVolumes []corev1api.Volume) bool { + var volume corev1api.Volume + for _, v := range podVolumes { + if v.Name == volumeName { + volume = v + break + } + } + return volume.Projected != nil || volume.DownwardAPI != nil +} + +// getPodSnapshotAnnotations returns a map, of volume name -> snapshot id, +// of all snapshots for this pod. +// TODO(2.0) to remove +// Deprecated: we will stop using pod annotations to record restic snapshot IDs after they're taken, +// therefore we won't need to check if these annotations exist. +func getPodSnapshotAnnotations(obj metav1.Object) map[string]string { + var res map[string]string + + insertSafe := func(k, v string) { + if res == nil { + res = make(map[string]string) + } + res[k] = v + } + + for k, v := range obj.GetAnnotations() { + if strings.HasPrefix(k, podAnnotationPrefix) { + insertSafe(k[len(podAnnotationPrefix):], v) + } + } + + return res +} + +// GetVolumesToBackup returns a list of volume names to backup for +// the provided pod. +// Deprecated: Use GetPodVolumesUsingRestic instead. +func GetVolumesToBackup(obj metav1.Object) []string { + annotations := obj.GetAnnotations() + if annotations == nil { + return nil + } + + backupsValue := annotations[VolumesToBackupAnnotation] + if backupsValue == "" { + return nil + } + + return strings.Split(backupsValue, ",") +} + +func getVolumesToExclude(obj metav1.Object) []string { + annotations := obj.GetAnnotations() + if annotations == nil { + return nil + } + + return strings.Split(annotations[VolumesToExcludeAnnotation], ",") +} + +func contains(list []string, k string) bool { + for _, i := range list { + if i == k { + return true + } + } + return false +} + +// GetPodVolumesUsingRestic returns a list of volume names to backup for the provided pod. +func GetPodVolumesUsingRestic(pod *corev1api.Pod, defaultVolumesToRestic bool) []string { + if !defaultVolumesToRestic { + return GetVolumesToBackup(pod) + } + + volsToExclude := getVolumesToExclude(pod) + podVolumes := []string{} + for _, pv := range pod.Spec.Volumes { + // cannot backup hostpath volumes as they are not mounted into /var/lib/kubelet/pods + // and therefore not accessible to the restic daemon set. + if pv.HostPath != nil { + continue + } + // don't backup volumes mounting secrets. Secrets will be backed up separately. + if pv.Secret != nil { + continue + } + // don't backup volumes mounting config maps. Config maps will be backed up separately. + if pv.ConfigMap != nil { + continue + } + // don't backup volumes mounted as projected volumes, all data in those come from kube state. + if pv.Projected != nil { + continue + } + // don't backup DownwardAPI volumes, all data in those come from kube state. + if pv.DownwardAPI != nil { + continue + } + // don't backup volumes that are included in the exclude list. + if contains(volsToExclude, pv.Name) { + continue + } + // don't include volumes that mount the default service account token. + if strings.HasPrefix(pv.Name, "default-token") { + continue + } + podVolumes = append(podVolumes, pv.Name) + } + return podVolumes +} diff --git a/pkg/podvolume/util_test.go b/pkg/podvolume/util_test.go new file mode 100644 index 000000000..88b574668 --- /dev/null +++ b/pkg/podvolume/util_test.go @@ -0,0 +1,563 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podvolume + +import ( + "sort" + "testing" + + "github.com/stretchr/testify/assert" + corev1api "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/builder" +) + +func TestGetVolumeBackupsForPod(t *testing.T) { + tests := []struct { + name string + podVolumeBackups []*velerov1api.PodVolumeBackup + podVolumes []corev1api.Volume + podAnnotations map[string]string + podName string + sourcePodNs string + expected map[string]string + }{ + { + name: "nil annotations results in no volume backups returned", + podAnnotations: nil, + expected: nil, + }, + { + name: "empty annotations results in no volume backups returned", + podAnnotations: make(map[string]string), + expected: nil, + }, + { + name: "pod annotations with no snapshot annotation prefix results in no volume backups returned", + podAnnotations: map[string]string{"foo": "bar"}, + expected: nil, + }, + { + name: "pod annotation with only snapshot annotation prefix, results in volume backup with empty volume key", + podAnnotations: map[string]string{podAnnotationPrefix: "snapshotID"}, + expected: map[string]string{"": "snapshotID"}, + }, + { + name: "pod annotation with snapshot annotation prefix results in volume backup with volume name and snapshot ID", + podAnnotations: map[string]string{podAnnotationPrefix + "volume": "snapshotID"}, + expected: map[string]string{"volume": "snapshotID"}, + }, + { + name: "only pod annotations with snapshot annotation prefix are considered", + podAnnotations: map[string]string{"x": "y", podAnnotationPrefix + "volume1": "snapshot1", podAnnotationPrefix + "volume2": "snapshot2"}, + expected: map[string]string{"volume1": "snapshot1", "volume2": "snapshot2"}, + }, + { + name: "pod annotations are not considered if PVBs are provided", + podVolumeBackups: []*velerov1api.PodVolumeBackup{ + builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot1").Volume("pvbtest1-foo").Result(), + builder.ForPodVolumeBackup("velero", "pvb-2").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot2").Volume("pvbtest2-abc").Result(), + }, + podName: "TestPod", + sourcePodNs: "TestNS", + podAnnotations: map[string]string{"x": "y", podAnnotationPrefix + "foo": "bar", podAnnotationPrefix + "abc": "123"}, + expected: map[string]string{"pvbtest1-foo": "snapshot1", "pvbtest2-abc": "snapshot2"}, + }, + { + name: "volume backups are returned even if no pod annotations are present", + podVolumeBackups: []*velerov1api.PodVolumeBackup{ + builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot1").Volume("pvbtest1-foo").Result(), + builder.ForPodVolumeBackup("velero", "pvb-2").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot2").Volume("pvbtest2-abc").Result(), + }, + podName: "TestPod", + sourcePodNs: "TestNS", + expected: map[string]string{"pvbtest1-foo": "snapshot1", "pvbtest2-abc": "snapshot2"}, + }, + { + name: "only volumes from PVBs with snapshot IDs are returned", + podVolumeBackups: []*velerov1api.PodVolumeBackup{ + builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot1").Volume("pvbtest1-foo").Result(), + builder.ForPodVolumeBackup("velero", "pvb-2").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot2").Volume("pvbtest2-abc").Result(), + builder.ForPodVolumeBackup("velero", "pvb-3").PodName("TestPod").PodNamespace("TestNS").Volume("pvbtest3-foo").Result(), + builder.ForPodVolumeBackup("velero", "pvb-4").PodName("TestPod").PodNamespace("TestNS").Volume("pvbtest4-abc").Result(), + }, + podName: "TestPod", + sourcePodNs: "TestNS", + expected: map[string]string{"pvbtest1-foo": "snapshot1", "pvbtest2-abc": "snapshot2"}, + }, + { + name: "only volumes from PVBs for the given pod are returned", + podVolumeBackups: []*velerov1api.PodVolumeBackup{ + builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot1").Volume("pvbtest1-foo").Result(), + builder.ForPodVolumeBackup("velero", "pvb-2").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot2").Volume("pvbtest2-abc").Result(), + builder.ForPodVolumeBackup("velero", "pvb-3").PodName("TestAnotherPod").SnapshotID("snapshot3").Volume("pvbtest3-xyz").Result(), + }, + podName: "TestPod", + sourcePodNs: "TestNS", + expected: map[string]string{"pvbtest1-foo": "snapshot1", "pvbtest2-abc": "snapshot2"}, + }, + { + name: "only volumes from PVBs which match the pod name and source pod namespace are returned", + podVolumeBackups: []*velerov1api.PodVolumeBackup{ + builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot1").Volume("pvbtest1-foo").Result(), + builder.ForPodVolumeBackup("velero", "pvb-2").PodName("TestAnotherPod").PodNamespace("TestNS").SnapshotID("snapshot2").Volume("pvbtest2-abc").Result(), + builder.ForPodVolumeBackup("velero", "pvb-3").PodName("TestPod").PodNamespace("TestAnotherNS").SnapshotID("snapshot3").Volume("pvbtest3-xyz").Result(), + }, + podName: "TestPod", + sourcePodNs: "TestNS", + expected: map[string]string{"pvbtest1-foo": "snapshot1"}, + }, + { + name: "volumes from PVBs that correspond to a pod volume from a projected source are not returned", + podVolumeBackups: []*velerov1api.PodVolumeBackup{ + builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot1").Volume("pvb-non-projected").Result(), + builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot2").Volume("pvb-projected").Result(), + }, + podVolumes: []corev1api.Volume{ + { + Name: "pvb-non-projected", + VolumeSource: corev1api.VolumeSource{ + PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{}, + }, + }, + { + Name: "pvb-projected", + VolumeSource: corev1api.VolumeSource{ + Projected: &corev1api.ProjectedVolumeSource{}, + }, + }, + }, + podName: "TestPod", + sourcePodNs: "TestNS", + expected: map[string]string{"pvb-non-projected": "snapshot1"}, + }, + { + name: "volumes from PVBs that correspond to a pod volume from a DownwardAPI source are not returned", + podVolumeBackups: []*velerov1api.PodVolumeBackup{ + builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot1").Volume("pvb-non-downwardapi").Result(), + builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot2").Volume("pvb-downwardapi").Result(), + }, + podVolumes: []corev1api.Volume{ + { + Name: "pvb-non-downwardapi", + VolumeSource: corev1api.VolumeSource{ + PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{}, + }, + }, + { + Name: "pvb-downwardapi", + VolumeSource: corev1api.VolumeSource{ + DownwardAPI: &corev1api.DownwardAPIVolumeSource{}, + }, + }, + }, + podName: "TestPod", + sourcePodNs: "TestNS", + expected: map[string]string{"pvb-non-downwardapi": "snapshot1"}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + pod := &corev1api.Pod{} + pod.Annotations = test.podAnnotations + pod.Name = test.podName + pod.Spec.Volumes = test.podVolumes + + res := GetVolumeBackupsForPod(test.podVolumeBackups, pod, test.sourcePodNs) + assert.Equal(t, test.expected, res) + }) + } +} + +func TestVolumeHasNonRestorableSource(t *testing.T) { + testCases := []struct { + name string + volumeName string + podVolumes []corev1api.Volume + expected bool + }{ + { + name: "volume name not in list of volumes", + volumeName: "missing-volume", + podVolumes: []corev1api.Volume{ + { + Name: "restorable", + VolumeSource: corev1api.VolumeSource{ + PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{}, + }, + }, + { + Name: "projected", + VolumeSource: corev1api.VolumeSource{ + Projected: &corev1api.ProjectedVolumeSource{}, + }, + }, + { + Name: "downwardapi", + VolumeSource: corev1api.VolumeSource{ + DownwardAPI: &corev1api.DownwardAPIVolumeSource{}, + }, + }, + }, + expected: false, + }, + { + name: "volume name in list of volumes but not projected or DownwardAPI", + volumeName: "restorable", + podVolumes: []corev1api.Volume{ + { + Name: "restorable", + VolumeSource: corev1api.VolumeSource{ + PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{}, + }, + }, + { + Name: "projected", + VolumeSource: corev1api.VolumeSource{ + Projected: &corev1api.ProjectedVolumeSource{}, + }, + }, + { + Name: "downwardapi", + VolumeSource: corev1api.VolumeSource{ + DownwardAPI: &corev1api.DownwardAPIVolumeSource{}, + }, + }, + }, + expected: false, + }, + { + name: "volume name in list of volumes and projected", + volumeName: "projected", + podVolumes: []corev1api.Volume{ + { + Name: "restorable", + VolumeSource: corev1api.VolumeSource{ + PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{}, + }, + }, + { + Name: "projected", + VolumeSource: corev1api.VolumeSource{ + Projected: &corev1api.ProjectedVolumeSource{}, + }, + }, + { + Name: "downwardapi", + VolumeSource: corev1api.VolumeSource{ + DownwardAPI: &corev1api.DownwardAPIVolumeSource{}, + }, + }, + }, + expected: true, + }, + { + name: "volume name in list of volumes and is a DownwardAPI volume", + volumeName: "downwardapi", + podVolumes: []corev1api.Volume{ + { + Name: "restorable", + VolumeSource: corev1api.VolumeSource{ + PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{}, + }, + }, + { + Name: "projected", + VolumeSource: corev1api.VolumeSource{ + Projected: &corev1api.ProjectedVolumeSource{}, + }, + }, + { + Name: "downwardapi", + VolumeSource: corev1api.VolumeSource{ + DownwardAPI: &corev1api.DownwardAPIVolumeSource{}, + }, + }, + }, + expected: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + actual := volumeHasNonRestorableSource(tc.volumeName, tc.podVolumes) + assert.Equal(t, tc.expected, actual) + }) + + } +} + +func TestGetVolumesToBackup(t *testing.T) { + tests := []struct { + name string + annotations map[string]string + expected []string + }{ + { + name: "nil annotations", + annotations: nil, + expected: nil, + }, + { + name: "no volumes to backup", + annotations: map[string]string{"foo": "bar"}, + expected: nil, + }, + { + name: "one volume to backup", + annotations: map[string]string{"foo": "bar", VolumesToBackupAnnotation: "volume-1"}, + expected: []string{"volume-1"}, + }, + { + name: "multiple volumes to backup", + annotations: map[string]string{"foo": "bar", VolumesToBackupAnnotation: "volume-1,volume-2,volume-3"}, + expected: []string{"volume-1", "volume-2", "volume-3"}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + pod := &corev1api.Pod{} + pod.Annotations = test.annotations + + res := GetVolumesToBackup(pod) + + // sort to ensure good compare of slices + sort.Strings(test.expected) + sort.Strings(res) + + assert.Equal(t, test.expected, res) + }) + } +} + +func TestGetPodVolumesUsingRestic(t *testing.T) { + testCases := []struct { + name string + pod *corev1api.Pod + expected []string + defaultVolumesToRestic bool + }{ + { + name: "should get PVs from VolumesToBackupAnnotation when defaultVolumesToRestic is false", + defaultVolumesToRestic: false, + pod: &corev1api.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + VolumesToBackupAnnotation: "resticPV1,resticPV2,resticPV3", + }, + }, + }, + expected: []string{"resticPV1", "resticPV2", "resticPV3"}, + }, + { + name: "should get all pod volumes when defaultVolumesToRestic is true and no PVs are excluded", + defaultVolumesToRestic: true, + pod: &corev1api.Pod{ + Spec: corev1api.PodSpec{ + Volumes: []corev1api.Volume{ + // Restic Volumes + {Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"}, + }, + }, + }, + expected: []string{"resticPV1", "resticPV2", "resticPV3"}, + }, + { + name: "should get all pod volumes except ones excluded when defaultVolumesToRestic is true", + defaultVolumesToRestic: true, + pod: &corev1api.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + VolumesToExcludeAnnotation: "nonResticPV1,nonResticPV2,nonResticPV3", + }, + }, + Spec: corev1api.PodSpec{ + Volumes: []corev1api.Volume{ + // Restic Volumes + {Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"}, + /// Excluded from restic through annotation + {Name: "nonResticPV1"}, {Name: "nonResticPV2"}, {Name: "nonResticPV3"}, + }, + }, + }, + expected: []string{"resticPV1", "resticPV2", "resticPV3"}, + }, + { + name: "should exclude default service account token from restic backup", + defaultVolumesToRestic: true, + pod: &corev1api.Pod{ + Spec: corev1api.PodSpec{ + Volumes: []corev1api.Volume{ + // Restic Volumes + {Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"}, + /// Excluded from restic because colume mounting default service account token + {Name: "default-token-5xq45"}, + }, + }, + }, + expected: []string{"resticPV1", "resticPV2", "resticPV3"}, + }, + { + name: "should exclude host path volumes from restic backups", + defaultVolumesToRestic: true, + pod: &corev1api.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + VolumesToExcludeAnnotation: "nonResticPV1,nonResticPV2,nonResticPV3", + }, + }, + Spec: corev1api.PodSpec{ + Volumes: []corev1api.Volume{ + // Restic Volumes + {Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"}, + /// Excluded from restic through annotation + {Name: "nonResticPV1"}, {Name: "nonResticPV2"}, {Name: "nonResticPV3"}, + // Excluded from restic because hostpath + {Name: "hostPath1", VolumeSource: corev1api.VolumeSource{HostPath: &corev1api.HostPathVolumeSource{Path: "/hostpathVol"}}}, + }, + }, + }, + expected: []string{"resticPV1", "resticPV2", "resticPV3"}, + }, + { + name: "should exclude volumes mounting secrets", + defaultVolumesToRestic: true, + pod: &corev1api.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + VolumesToExcludeAnnotation: "nonResticPV1,nonResticPV2,nonResticPV3", + }, + }, + Spec: corev1api.PodSpec{ + Volumes: []corev1api.Volume{ + // Restic Volumes + {Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"}, + /// Excluded from restic through annotation + {Name: "nonResticPV1"}, {Name: "nonResticPV2"}, {Name: "nonResticPV3"}, + // Excluded from restic because hostpath + {Name: "superSecret", VolumeSource: corev1api.VolumeSource{Secret: &corev1api.SecretVolumeSource{SecretName: "super-secret"}}}, + }, + }, + }, + expected: []string{"resticPV1", "resticPV2", "resticPV3"}, + }, + { + name: "should exclude volumes mounting config maps", + defaultVolumesToRestic: true, + pod: &corev1api.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + VolumesToExcludeAnnotation: "nonResticPV1,nonResticPV2,nonResticPV3", + }, + }, + Spec: corev1api.PodSpec{ + Volumes: []corev1api.Volume{ + // Restic Volumes + {Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"}, + /// Excluded from restic through annotation + {Name: "nonResticPV1"}, {Name: "nonResticPV2"}, {Name: "nonResticPV3"}, + // Excluded from restic because hostpath + {Name: "appCOnfig", VolumeSource: corev1api.VolumeSource{ConfigMap: &corev1api.ConfigMapVolumeSource{LocalObjectReference: corev1api.LocalObjectReference{Name: "app-config"}}}}, + }, + }, + }, + expected: []string{"resticPV1", "resticPV2", "resticPV3"}, + }, + { + name: "should exclude projected volumes", + defaultVolumesToRestic: true, + pod: &corev1api.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + VolumesToExcludeAnnotation: "nonResticPV1,nonResticPV2,nonResticPV3", + }, + }, + Spec: corev1api.PodSpec{ + Volumes: []corev1api.Volume{ + {Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"}, + { + Name: "projected", + VolumeSource: corev1api.VolumeSource{ + Projected: &corev1api.ProjectedVolumeSource{ + Sources: []corev1api.VolumeProjection{{ + Secret: &corev1api.SecretProjection{ + LocalObjectReference: corev1api.LocalObjectReference{}, + Items: nil, + Optional: nil, + }, + DownwardAPI: nil, + ConfigMap: nil, + ServiceAccountToken: nil, + }}, + DefaultMode: nil, + }, + }, + }, + }, + }, + }, + expected: []string{"resticPV1", "resticPV2", "resticPV3"}, + }, + { + name: "should exclude DownwardAPI volumes", + defaultVolumesToRestic: true, + pod: &corev1api.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + VolumesToExcludeAnnotation: "nonResticPV1,nonResticPV2,nonResticPV3", + }, + }, + Spec: corev1api.PodSpec{ + Volumes: []corev1api.Volume{ + {Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"}, + { + Name: "downwardAPI", + VolumeSource: corev1api.VolumeSource{ + DownwardAPI: &corev1api.DownwardAPIVolumeSource{ + Items: []corev1api.DownwardAPIVolumeFile{ + { + Path: "labels", + FieldRef: &corev1api.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.labels", + }, + }, + }, + }, + }, + }, + }, + }, + }, + expected: []string{"resticPV1", "resticPV2", "resticPV3"}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + actual := GetPodVolumesUsingRestic(tc.pod, tc.defaultVolumesToRestic) + + sort.Strings(tc.expected) + sort.Strings(actual) + assert.Equal(t, tc.expected, actual) + }) + } +} diff --git a/pkg/repository/config/aws.go b/pkg/repository/config/aws.go new file mode 100644 index 000000000..0ff4ca218 --- /dev/null +++ b/pkg/repository/config/aws.go @@ -0,0 +1,99 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "context" + "os" + + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3/s3manager" + "github.com/pkg/errors" +) + +const ( + // AWS specific environment variable + awsProfileEnvVar = "AWS_PROFILE" + awsProfileKey = "profile" + awsCredentialsFileEnvVar = "AWS_SHARED_CREDENTIALS_FILE" +) + +// GetS3ResticEnvVars gets the environment variables that restic +// relies on (AWS_PROFILE) based on info in the provided object +// storage location config map. +func GetS3ResticEnvVars(config map[string]string) (map[string]string, error) { + result := make(map[string]string) + + if credentialsFile, ok := config[CredentialsFileKey]; ok { + result[awsCredentialsFileEnvVar] = credentialsFile + } + + if profile, ok := config[awsProfileKey]; ok { + result[awsProfileEnvVar] = profile + } + + return result, nil +} + +// GetS3Credentials gets the S3 credential values according to the information +// of the provided config or the system's environment variables +func GetS3Credentials(config map[string]string) (credentials.Value, error) { + credentialsFile := config[CredentialsFileKey] + if credentialsFile == "" { + credentialsFile = os.Getenv("AWS_SHARED_CREDENTIALS_FILE") + } + + if credentialsFile == "" { + return credentials.Value{}, errors.New("missing credential file") + } + + creds := credentials.NewSharedCredentials(credentialsFile, "") + credValue, err := creds.Get() + if err != nil { + return credValue, err + } + + return credValue, nil +} + +// GetAWSBucketRegion returns the AWS region that a bucket is in, or an error +// if the region cannot be determined. +func GetAWSBucketRegion(bucket string) (string, error) { + var region string + + sess, err := session.NewSession() + if err != nil { + return "", errors.WithStack(err) + } + + for _, partition := range endpoints.DefaultPartitions() { + for regionHint := range partition.Regions() { + region, _ = s3manager.GetBucketRegion(context.Background(), sess, bucket, regionHint) + + // we only need to try a single region hint per partition, so break after the first + break + } + + if region != "" { + return region, nil + } + } + + return "", errors.New("unable to determine bucket's region") +} diff --git a/pkg/restic/aws_test.go b/pkg/repository/config/aws_test.go similarity index 96% rename from pkg/restic/aws_test.go rename to pkg/repository/config/aws_test.go index 51f3ceb99..bdd3e4fa2 100644 --- a/pkg/restic/aws_test.go +++ b/pkg/repository/config/aws_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package restic +package config import ( "testing" @@ -55,7 +55,7 @@ func TestGetS3ResticEnvVars(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - actual, err := getS3ResticEnvVars(tc.config) + actual, err := GetS3ResticEnvVars(tc.config) require.NoError(t, err) diff --git a/pkg/restic/azure.go b/pkg/repository/config/azure.go similarity index 88% rename from pkg/restic/azure.go rename to pkg/repository/config/azure.go index 20324b8e3..8c5871c52 100644 --- a/pkg/restic/azure.go +++ b/pkg/repository/config/azure.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package restic +package config import ( "context" @@ -37,6 +37,7 @@ const ( storageAccountConfigKey = "storageAccount" storageAccountKeyEnvVarConfigKey = "storageAccountKeyEnvVar" subscriptionIDConfigKey = "subscriptionId" + storageDomainConfigKey = "storageDomain" ) // getSubscriptionID gets the subscription ID from the 'config' map if it contains @@ -131,10 +132,10 @@ func mapLookup(data map[string]string) func(string) string { } } -// getAzureResticEnvVars gets the environment variables that restic +// GetAzureResticEnvVars gets the environment variables that restic // relies on (AZURE_ACCOUNT_NAME and AZURE_ACCOUNT_KEY) based // on info in the provided object storage location config map. -func getAzureResticEnvVars(config map[string]string) (map[string]string, error) { +func GetAzureResticEnvVars(config map[string]string) (map[string]string, error) { storageAccountKey, _, err := getStorageAccountKey(config) if err != nil { return nil, err @@ -158,7 +159,7 @@ func credentialsFileFromEnv() string { // selectCredentialsFile selects the Azure credentials file to use, retrieving it // from the given config or falling back to retrieving it from the environment. func selectCredentialsFile(config map[string]string) string { - if credentialsFile, ok := config[credentialsFileKey]; ok { + if credentialsFile, ok := config[CredentialsFileKey]; ok { return credentialsFile } @@ -208,3 +209,22 @@ func getRequiredValues(getValue func(string) string, keys ...string) (map[string return results, nil } + +// GetAzureStorageDomain gets the Azure storage domain required by a Azure blob connection, +// if the provided config doean't have the value, get it from system's environment variables +func GetAzureStorageDomain(config map[string]string) string { + if domain, exist := config[storageDomainConfigKey]; exist { + return domain + } else { + return os.Getenv(cloudNameEnvVar) + } +} + +func GetAzureCredentials(config map[string]string) (string, string, error) { + storageAccountKey, _, err := getStorageAccountKey(config) + if err != nil { + return "", "", err + } + + return config[storageAccountConfigKey], storageAccountKey, nil +} diff --git a/pkg/restic/azure_test.go b/pkg/repository/config/azure_test.go similarity index 99% rename from pkg/restic/azure_test.go rename to pkg/repository/config/azure_test.go index acb2f2506..d20ac2e28 100644 --- a/pkg/restic/azure_test.go +++ b/pkg/repository/config/azure_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package restic +package config import ( "os" diff --git a/pkg/restic/config.go b/pkg/repository/config/config.go similarity index 74% rename from pkg/restic/config.go rename to pkg/repository/config/config.go index 1600f39fa..d7ed99b69 100644 --- a/pkg/restic/config.go +++ b/pkg/repository/config/config.go @@ -14,17 +14,13 @@ See the License for the specific language governing permissions and limitations under the License. */ -package restic +package config import ( - "context" "fmt" "path" "strings" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/s3/s3manager" "github.com/pkg/errors" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" @@ -37,11 +33,16 @@ const ( AWSBackend BackendType = "velero.io/aws" AzureBackend BackendType = "velero.io/azure" GCPBackend BackendType = "velero.io/gcp" + FSBackend BackendType = "velero.io/fs" + + // CredentialsFileKey is the key within a BSL config that is checked to see if + // the BSL is using its own credentials, rather than those in the environment + CredentialsFileKey = "credentialsFile" ) // this func is assigned to a package-level variable so it can be // replaced when unit-testing -var getAWSBucketRegion = getBucketRegion +var getAWSBucketRegion = GetAWSBucketRegion // getRepoPrefix returns the prefix of the value of the --repo flag for // restic commands, i.e. everything except the "/". @@ -55,7 +56,7 @@ func getRepoPrefix(location *velerov1api.BackupStorageLocation) (string, error) prefix = layout.GetResticDir() } - backendType := getBackendType(location.Spec.Provider) + backendType := GetBackendType(location.Spec.Provider) if repoPrefix := location.Spec.Config["resticRepoPrefix"]; repoPrefix != "" { return repoPrefix, nil @@ -89,7 +90,7 @@ func getRepoPrefix(location *velerov1api.BackupStorageLocation) (string, error) return "", errors.New("restic repository prefix (resticRepoPrefix) not specified in backup storage location's config") } -func getBackendType(provider string) BackendType { +func GetBackendType(provider string) BackendType { if !strings.Contains(provider, "/") { provider = "velero.io/" + provider } @@ -97,6 +98,10 @@ func getBackendType(provider string) BackendType { return BackendType(provider) } +func IsBackendTypeValid(backendType BackendType) bool { + return (backendType == AWSBackend || backendType == AzureBackend || backendType == GCPBackend || backendType == FSBackend) +} + // GetRepoIdentifier returns the string to be used as the value of the --repo flag in // restic commands for the given repository. func GetRepoIdentifier(location *velerov1api.BackupStorageLocation, name string) (string, error) { @@ -107,29 +112,3 @@ func GetRepoIdentifier(location *velerov1api.BackupStorageLocation, name string) return fmt.Sprintf("%s/%s", strings.TrimSuffix(prefix, "/"), name), nil } - -// getBucketRegion returns the AWS region that a bucket is in, or an error -// if the region cannot be determined. -func getBucketRegion(bucket string) (string, error) { - var region string - - sess, err := session.NewSession() - if err != nil { - return "", errors.WithStack(err) - } - - for _, partition := range endpoints.DefaultPartitions() { - for regionHint := range partition.Regions() { - region, _ = s3manager.GetBucketRegion(context.Background(), sess, bucket, regionHint) - - // we only need to try a single region hint per partition, so break after the first - break - } - - if region != "" { - return region, nil - } - } - - return "", errors.New("unable to determine bucket's region") -} diff --git a/pkg/restic/config_test.go b/pkg/repository/config/config_test.go similarity index 99% rename from pkg/restic/config_test.go rename to pkg/repository/config/config_test.go index 8418d6808..2fa26a193 100644 --- a/pkg/restic/config_test.go +++ b/pkg/repository/config/config_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package restic +package config import ( "testing" diff --git a/pkg/restic/gcp.go b/pkg/repository/config/gcp.go similarity index 59% rename from pkg/restic/gcp.go rename to pkg/repository/config/gcp.go index 96d1edfe6..ed9e3ec6a 100644 --- a/pkg/restic/gcp.go +++ b/pkg/repository/config/gcp.go @@ -14,21 +14,33 @@ See the License for the specific language governing permissions and limitations under the License. */ -package restic +package config + +import "os" const ( // GCP specific environment variable gcpCredentialsFileEnvVar = "GOOGLE_APPLICATION_CREDENTIALS" ) -// getGCPResticEnvVars gets the environment variables that restic relies +// GetGCPResticEnvVars gets the environment variables that restic relies // on based on info in the provided object storage location config map. -func getGCPResticEnvVars(config map[string]string) (map[string]string, error) { +func GetGCPResticEnvVars(config map[string]string) (map[string]string, error) { result := make(map[string]string) - if credentialsFile, ok := config[credentialsFileKey]; ok { + if credentialsFile, ok := config[CredentialsFileKey]; ok { result[gcpCredentialsFileEnvVar] = credentialsFile } return result, nil } + +// GetGCPCredentials gets the credential file required by a GCP bucket connection, +// if the provided config doean't have the value, get it from system's environment variables +func GetGCPCredentials(config map[string]string) string { + if credentialsFile, ok := config[CredentialsFileKey]; ok { + return credentialsFile + } else { + return os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") + } +} diff --git a/pkg/restic/gcp_test.go b/pkg/repository/config/gcp_test.go similarity index 95% rename from pkg/restic/gcp_test.go rename to pkg/repository/config/gcp_test.go index 37f2bf2c7..cd4411e3b 100644 --- a/pkg/restic/gcp_test.go +++ b/pkg/repository/config/gcp_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package restic +package config import ( "testing" @@ -46,7 +46,7 @@ func TestGetGCPResticEnvVars(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - actual, err := getGCPResticEnvVars(tc.config) + actual, err := GetGCPResticEnvVars(tc.config) require.NoError(t, err) diff --git a/pkg/restic/repository_ensurer.go b/pkg/repository/ensurer.go similarity index 79% rename from pkg/restic/repository_ensurer.go rename to pkg/repository/ensurer.go index f1f4f168a..15aa10701 100644 --- a/pkg/restic/repository_ensurer.go +++ b/pkg/repository/ensurer.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package restic +package repository import ( "context" @@ -35,14 +35,14 @@ import ( "github.com/vmware-tanzu/velero/pkg/label" ) -// repositoryEnsurer ensures that Velero restic repositories are created and ready. -type repositoryEnsurer struct { +// RepositoryEnsurer ensures that backup repositories are created and ready. +type RepositoryEnsurer struct { log logrus.FieldLogger - repoLister velerov1listers.ResticRepositoryLister - repoClient velerov1client.ResticRepositoriesGetter + repoLister velerov1listers.BackupRepositoryLister + repoClient velerov1client.BackupRepositoriesGetter repoChansLock sync.Mutex - repoChans map[string]chan *velerov1api.ResticRepository + repoChans map[string]chan *velerov1api.BackupRepository // repoLocksMu synchronizes reads/writes to the repoLocks map itself // since maps are not threadsafe. @@ -55,20 +55,20 @@ type repoKey struct { backupLocation string } -func newRepositoryEnsurer(repoInformer velerov1informers.ResticRepositoryInformer, repoClient velerov1client.ResticRepositoriesGetter, log logrus.FieldLogger) *repositoryEnsurer { - r := &repositoryEnsurer{ +func NewRepositoryEnsurer(repoInformer velerov1informers.BackupRepositoryInformer, repoClient velerov1client.BackupRepositoriesGetter, log logrus.FieldLogger) *RepositoryEnsurer { + r := &RepositoryEnsurer{ log: log, repoLister: repoInformer.Lister(), repoClient: repoClient, - repoChans: make(map[string]chan *velerov1api.ResticRepository), + repoChans: make(map[string]chan *velerov1api.BackupRepository), repoLocks: make(map[repoKey]*sync.Mutex), } repoInformer.Informer().AddEventHandler( cache.ResourceEventHandlerFuncs{ UpdateFunc: func(old, upd interface{}) { - oldObj := old.(*velerov1api.ResticRepository) - newObj := upd.(*velerov1api.ResticRepository) + oldObj := old.(*velerov1api.BackupRepository) + newObj := upd.(*velerov1api.BackupRepository) // we're only interested in phase-changing updates if oldObj.Status.Phase == newObj.Status.Phase { @@ -76,7 +76,7 @@ func newRepositoryEnsurer(repoInformer velerov1informers.ResticRepositoryInforme } // we're only interested in updates where the updated object is either Ready or NotReady - if newObj.Status.Phase != velerov1api.ResticRepositoryPhaseReady && newObj.Status.Phase != velerov1api.ResticRepositoryPhaseNotReady { + if newObj.Status.Phase != velerov1api.BackupRepositoryPhaseReady && newObj.Status.Phase != velerov1api.BackupRepositoryPhaseNotReady { return } @@ -105,7 +105,7 @@ func repoLabels(volumeNamespace, backupLocation string) labels.Set { } } -func (r *repositoryEnsurer) EnsureRepo(ctx context.Context, namespace, volumeNamespace, backupLocation string) (*velerov1api.ResticRepository, error) { +func (r *RepositoryEnsurer) EnsureRepo(ctx context.Context, namespace, volumeNamespace, backupLocation string) (*velerov1api.BackupRepository, error) { log := r.log.WithField("volumeNamespace", volumeNamespace).WithField("backupLocation", backupLocation) // It's only safe to have one instance of this method executing concurrently for a @@ -132,7 +132,7 @@ func (r *repositoryEnsurer) EnsureRepo(ctx context.Context, namespace, volumeNam selector := labels.SelectorFromSet(repoLabels(volumeNamespace, backupLocation)) - repos, err := r.repoLister.ResticRepositories(namespace).List(selector) + repos, err := r.repoLister.BackupRepositories(namespace).List(selector) if err != nil { return nil, errors.WithStack(err) } @@ -140,7 +140,7 @@ func (r *repositoryEnsurer) EnsureRepo(ctx context.Context, namespace, volumeNam return nil, errors.Errorf("more than one ResticRepository found for workload namespace %q, backup storage location %q", volumeNamespace, backupLocation) } if len(repos) == 1 { - if repos[0].Status.Phase != velerov1api.ResticRepositoryPhaseReady { + if repos[0].Status.Phase != velerov1api.BackupRepositoryPhaseReady { return nil, errors.Errorf("restic repository is not ready: %s", repos[0].Status.Message) } @@ -151,13 +151,13 @@ func (r *repositoryEnsurer) EnsureRepo(ctx context.Context, namespace, volumeNam log.Debug("No repository found, creating one") // no repo found: create one and wait for it to be ready - repo := &velerov1api.ResticRepository{ + repo := &velerov1api.BackupRepository{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, GenerateName: fmt.Sprintf("%s-%s-", volumeNamespace, backupLocation), Labels: repoLabels(volumeNamespace, backupLocation), }, - Spec: velerov1api.ResticRepositorySpec{ + Spec: velerov1api.BackupRepositorySpec{ VolumeNamespace: volumeNamespace, BackupStorageLocation: backupLocation, }, @@ -169,7 +169,7 @@ func (r *repositoryEnsurer) EnsureRepo(ctx context.Context, namespace, volumeNam close(repoChan) }() - if _, err := r.repoClient.ResticRepositories(namespace).Create(context.TODO(), repo, metav1.CreateOptions{}); err != nil { + if _, err := r.repoClient.BackupRepositories(namespace).Create(context.TODO(), repo, metav1.CreateOptions{}); err != nil { return nil, errors.Wrapf(err, "unable to create restic repository resource") } @@ -181,7 +181,8 @@ func (r *repositoryEnsurer) EnsureRepo(ctx context.Context, namespace, volumeNam case <-ctx.Done(): return nil, errors.New("timed out waiting for restic repository to become ready") case res := <-repoChan: - if res.Status.Phase == velerov1api.ResticRepositoryPhaseNotReady { + + if res.Status.Phase == velerov1api.BackupRepositoryPhaseNotReady { return nil, errors.Errorf("restic repository is not ready: %s", res.Status.Message) } @@ -189,15 +190,15 @@ func (r *repositoryEnsurer) EnsureRepo(ctx context.Context, namespace, volumeNam } } -func (r *repositoryEnsurer) getRepoChan(name string) chan *velerov1api.ResticRepository { +func (r *RepositoryEnsurer) getRepoChan(name string) chan *velerov1api.BackupRepository { r.repoChansLock.Lock() defer r.repoChansLock.Unlock() - r.repoChans[name] = make(chan *velerov1api.ResticRepository) + r.repoChans[name] = make(chan *velerov1api.BackupRepository) return r.repoChans[name] } -func (r *repositoryEnsurer) repoLock(volumeNamespace, backupLocation string) *sync.Mutex { +func (r *RepositoryEnsurer) repoLock(volumeNamespace, backupLocation string) *sync.Mutex { r.repoLocksMu.Lock() defer r.repoLocksMu.Unlock() diff --git a/pkg/repository/keys/keys.go b/pkg/repository/keys/keys.go new file mode 100644 index 000000000..3da876e28 --- /dev/null +++ b/pkg/repository/keys/keys.go @@ -0,0 +1,75 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package keys + +import ( + "context" + + "github.com/pkg/errors" + corev1api "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + + "github.com/vmware-tanzu/velero/pkg/builder" +) + +const ( + credentialsSecretName = "velero-restic-credentials" + credentialsKey = "repository-password" + + encryptionKey = "static-passw0rd" +) + +func EnsureCommonRepositoryKey(secretClient corev1client.SecretsGetter, namespace string) error { + _, err := secretClient.Secrets(namespace).Get(context.TODO(), credentialsSecretName, metav1.GetOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + return errors.WithStack(err) + } + if err == nil { + return nil + } + + // if we got here, we got an IsNotFound error, so we need to create the key + + secret := &corev1api.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: credentialsSecretName, + }, + Type: corev1api.SecretTypeOpaque, + Data: map[string][]byte{ + credentialsKey: []byte(encryptionKey), + }, + } + + if _, err = secretClient.Secrets(namespace).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { + return errors.Wrapf(err, "error creating %s secret", credentialsSecretName) + } + + return nil +} + +// RepoKeySelector returns the SecretKeySelector which can be used to fetch +// the restic repository key. +func RepoKeySelector() *corev1api.SecretKeySelector { + // For now, all restic repos share the same key so we don't need the repoName to fetch it. + // When we move to full-backup encryption, we'll likely have a separate key per restic repo + // (all within the Velero server's namespace) so RepoKeySelector will need to select the key + // for that repo. + return builder.ForSecretKeySelector(credentialsSecretName, credentialsKey).Result() +} diff --git a/pkg/repository/keys/keys_test.go b/pkg/repository/keys/keys_test.go new file mode 100644 index 000000000..3102b58c8 --- /dev/null +++ b/pkg/repository/keys/keys_test.go @@ -0,0 +1,30 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package keys + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestRepoKeySelector(t *testing.T) { + selector := RepoKeySelector() + + require.Equal(t, credentialsSecretName, selector.Name) + require.Equal(t, credentialsKey, selector.Key) +} diff --git a/pkg/restic/repo_locker.go b/pkg/repository/locker.go similarity index 79% rename from pkg/restic/repo_locker.go rename to pkg/repository/locker.go index 29434753e..20eea9635 100644 --- a/pkg/restic/repo_locker.go +++ b/pkg/repository/locker.go @@ -13,23 +13,24 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -package restic + +package repository import "sync" -// repoLocker manages exclusive/non-exclusive locks for +// RepoLocker manages exclusive/non-exclusive locks for // operations against restic repositories. The semantics // of exclusive/non-exclusive locks are the same as for // a sync.RWMutex, where a non-exclusive lock is equivalent // to a read lock, and an exclusive lock is equivalent to // a write lock. -type repoLocker struct { +type RepoLocker struct { mu sync.Mutex locks map[string]*sync.RWMutex } -func newRepoLocker() *repoLocker { - return &repoLocker{ +func NewRepoLocker() *RepoLocker { + return &RepoLocker{ locks: make(map[string]*sync.RWMutex), } } @@ -37,28 +38,28 @@ func newRepoLocker() *repoLocker { // LockExclusive acquires an exclusive lock for the specified // repository. This function blocks until no other locks exist // for the repo. -func (rl *repoLocker) LockExclusive(name string) { +func (rl *RepoLocker) LockExclusive(name string) { rl.ensureLock(name).Lock() } // Lock acquires a non-exclusive lock for the specified // repository. This function blocks until no exclusive // locks exist for the repo. -func (rl *repoLocker) Lock(name string) { +func (rl *RepoLocker) Lock(name string) { rl.ensureLock(name).RLock() } // UnlockExclusive releases an exclusive lock for the repo. -func (rl *repoLocker) UnlockExclusive(name string) { +func (rl *RepoLocker) UnlockExclusive(name string) { rl.ensureLock(name).Unlock() } // Unlock releases a non-exclusive lock for the repo. -func (rl *repoLocker) Unlock(name string) { +func (rl *RepoLocker) Unlock(name string) { rl.ensureLock(name).RUnlock() } -func (rl *repoLocker) ensureLock(name string) *sync.RWMutex { +func (rl *RepoLocker) ensureLock(name string) *sync.RWMutex { rl.mu.Lock() defer rl.mu.Unlock() diff --git a/pkg/repository/provider/provider.go b/pkg/repository/provider/provider.go new file mode 100644 index 000000000..36d69a594 --- /dev/null +++ b/pkg/repository/provider/provider.go @@ -0,0 +1,56 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provider + +import ( + "context" + + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" +) + +// RepoParam includes the parameters to manipulate a backup repository +// SubDir is used to generate the path in the backup storage +type RepoParam struct { + SubDir string + BackupLocation *velerov1api.BackupStorageLocation +} + +type Provider interface { + //InitRepo is to initialize a repository from a new storage place + InitRepo(ctx context.Context, param RepoParam) error + + //ConnectToRepo is to establish the connection to a + //storage place that a repository is already initialized + ConnectToRepo(ctx context.Context, param RepoParam) error + + //PrepareRepo is a combination of InitRepo and ConnectToRepo, + //it may do initializing + connecting, connecting only if the repository + //is already initialized, or do nothing if the repository is already connected + PrepareRepo(ctx context.Context, param RepoParam) error + + //PruneRepo does a full prune/maintenance of the repository + PruneRepo(ctx context.Context, param RepoParam) error + + //PruneRepoQuick does a quick prune/maintenance of the repository if available + PruneRepoQuick(ctx context.Context, param RepoParam) error + + //EnsureUnlockRepo esures to remove any stale file locks in the storage + EnsureUnlockRepo(ctx context.Context, param RepoParam) error + + //Forget is to delete a snapshot from the repository + Forget(ctx context.Context, snapshotID string, param RepoParam) error +} diff --git a/pkg/repository/provider/unified_repo.go b/pkg/repository/provider/unified_repo.go new file mode 100644 index 000000000..8ddff5799 --- /dev/null +++ b/pkg/repository/provider/unified_repo.go @@ -0,0 +1,315 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provider + +import ( + "context" + "fmt" + "path" + "strings" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + + "github.com/vmware-tanzu/velero/internal/credentials" + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + repoconfig "github.com/vmware-tanzu/velero/pkg/repository/config" + repokey "github.com/vmware-tanzu/velero/pkg/repository/keys" + "github.com/vmware-tanzu/velero/pkg/repository/udmrepo" + "github.com/vmware-tanzu/velero/pkg/util/ownership" +) + +type unifiedRepoProvider struct { + credentialGetter credentials.CredentialGetter + workPath string + repoService udmrepo.BackupRepoService + log logrus.FieldLogger +} + +// this func is assigned to a package-level variable so it can be +// replaced when unit-testing +var getAzureCredentials = repoconfig.GetAzureCredentials +var getS3Credentials = repoconfig.GetS3Credentials +var getGCPCredentials = repoconfig.GetGCPCredentials +var getS3BucketRegion = repoconfig.GetAWSBucketRegion +var getAzureStorageDomain = repoconfig.GetAzureStorageDomain + +type localFuncTable struct { + getRepoPassword func(credentials.SecretStore, RepoParam) (string, error) + getStorageVariables func(*velerov1api.BackupStorageLocation, string) (map[string]string, error) + getStorageCredentials func(*velerov1api.BackupStorageLocation, credentials.FileStore) (map[string]string, error) +} + +var funcTable = localFuncTable{ + getRepoPassword: getRepoPassword, + getStorageVariables: getStorageVariables, + getStorageCredentials: getStorageCredentials, +} + +// NewUnifiedRepoProvider creates the service provider for Unified Repo +// workPath is the path for Unified Repo to store some local information +// workPath could be empty, if so, the default path will be used +func NewUnifiedRepoProvider( + credentialGetter credentials.CredentialGetter, + workPath string, + log logrus.FieldLogger, +) (Provider, error) { + repo := unifiedRepoProvider{ + credentialGetter: credentialGetter, + workPath: workPath, + log: log, + } + + repo.repoService = createRepoService(log) + + log.Debug("Finished create unified repo service") + + return &repo, nil +} + +func (urp *unifiedRepoProvider) InitRepo(ctx context.Context, param RepoParam) error { + log := urp.log.WithFields(logrus.Fields{ + "BSL name": param.BackupLocation.Name, + "BSL UID": param.BackupLocation.UID, + }) + + log.Debug("Start to init repo") + + repoOption, err := urp.getRepoOption(param) + if err != nil { + return errors.Wrap(err, "error to get repo options") + } + + err = urp.repoService.Init(ctx, repoOption, true) + if err != nil { + return errors.Wrap(err, "error to init backup repo") + } + + log.Debug("Init repo complete") + + return nil +} + +func (urp *unifiedRepoProvider) ConnectToRepo(ctx context.Context, param RepoParam) error { + ///TODO + return nil +} + +func (urp *unifiedRepoProvider) PrepareRepo(ctx context.Context, param RepoParam) error { + ///TODO + return nil +} + +func (urp *unifiedRepoProvider) PruneRepo(ctx context.Context, param RepoParam) error { + ///TODO + return nil +} + +func (urp *unifiedRepoProvider) PruneRepoQuick(ctx context.Context, param RepoParam) error { + ///TODO + return nil +} + +func (urp *unifiedRepoProvider) EnsureUnlockRepo(ctx context.Context, param RepoParam) error { + return nil +} + +func (urp *unifiedRepoProvider) Forget(ctx context.Context, snapshotID string, param RepoParam) error { + ///TODO + return nil +} + +func getRepoPassword(secretStore credentials.SecretStore, param RepoParam) (string, error) { + if secretStore == nil { + return "", errors.New("invalid credentials interface") + } + + buf, err := secretStore.Get(repokey.RepoKeySelector()) + if err != nil { + return "", errors.Wrap(err, "error to get password buffer") + } + + return strings.TrimSpace(string(buf)), nil +} + +func (urp *unifiedRepoProvider) getRepoOption(param RepoParam) (udmrepo.RepoOptions, error) { + repoPassword, err := funcTable.getRepoPassword(urp.credentialGetter.FromSecret, param) + if err != nil { + return udmrepo.RepoOptions{}, errors.Wrap(err, "error to get repo password") + } + + storeVar, err := funcTable.getStorageVariables(param.BackupLocation, param.SubDir) + if err != nil { + return udmrepo.RepoOptions{}, errors.Wrap(err, "error to get storage variables") + } + + storeCred, err := funcTable.getStorageCredentials(param.BackupLocation, urp.credentialGetter.FromFile) + if err != nil { + return udmrepo.RepoOptions{}, errors.Wrap(err, "error to get repo credentials") + } + + repoOption := udmrepo.RepoOptions{ + StorageType: getStorageType(param.BackupLocation), + RepoPassword: repoPassword, + ConfigFilePath: getRepoConfigFile(urp.workPath, string(param.BackupLocation.UID)), + Ownership: udmrepo.OwnershipOptions{ + Username: ownership.GetRepositoryOwner().Username, + DomainName: ownership.GetRepositoryOwner().DomainName, + }, + StorageOptions: make(map[string]string), + GeneralOptions: make(map[string]string), + } + + for k, v := range storeVar { + repoOption.StorageOptions[k] = v + } + + for k, v := range storeCred { + repoOption.StorageOptions[k] = v + } + + return repoOption, nil +} + +func getStorageType(backupLocation *velerov1api.BackupStorageLocation) string { + backendType := repoconfig.GetBackendType(backupLocation.Spec.Provider) + + switch backendType { + case repoconfig.AWSBackend: + return udmrepo.StorageTypeS3 + case repoconfig.AzureBackend: + return udmrepo.StorageTypeAzure + case repoconfig.GCPBackend: + return udmrepo.StorageTypeGcs + case repoconfig.FSBackend: + return udmrepo.StorageTypeFs + default: + return "" + } +} + +func getStorageCredentials(backupLocation *velerov1api.BackupStorageLocation, credentialsFileStore credentials.FileStore) (map[string]string, error) { + result := make(map[string]string) + var err error + + if credentialsFileStore == nil { + return map[string]string{}, errors.New("invalid credentials interface") + } + + backendType := repoconfig.GetBackendType(backupLocation.Spec.Provider) + if !repoconfig.IsBackendTypeValid(backendType) { + return map[string]string{}, errors.New("invalid storage provider") + } + + config := backupLocation.Spec.Config + if config == nil { + config = map[string]string{} + } + + if backupLocation.Spec.Credential != nil { + config[repoconfig.CredentialsFileKey], err = credentialsFileStore.Path(backupLocation.Spec.Credential) + if err != nil { + return map[string]string{}, errors.Wrap(err, "error get credential file in bsl") + } + } + + switch backendType { + case repoconfig.AWSBackend: + credValue, err := getS3Credentials(config) + if err != nil { + return map[string]string{}, errors.Wrap(err, "error get s3 credentials") + } + result[udmrepo.StoreOptionS3KeyId] = credValue.AccessKeyID + result[udmrepo.StoreOptionS3Provider] = credValue.ProviderName + result[udmrepo.StoreOptionS3SecretKey] = credValue.SecretAccessKey + result[udmrepo.StoreOptionS3Token] = credValue.SessionToken + + case repoconfig.AzureBackend: + storageAccount, accountKey, err := getAzureCredentials(config) + if err != nil { + return map[string]string{}, errors.Wrap(err, "error get azure credentials") + } + result[udmrepo.StoreOptionAzureStorageAccount] = storageAccount + result[udmrepo.StoreOptionAzureKey] = accountKey + + case repoconfig.GCPBackend: + result[udmrepo.StoreOptionCredentialFile] = getGCPCredentials(config) + } + + return result, nil +} + +func getStorageVariables(backupLocation *velerov1api.BackupStorageLocation, repoName string) (map[string]string, error) { + result := make(map[string]string) + + backendType := repoconfig.GetBackendType(backupLocation.Spec.Provider) + if !repoconfig.IsBackendTypeValid(backendType) { + return map[string]string{}, errors.New("invalid storage provider") + } + + config := backupLocation.Spec.Config + if config == nil { + config = map[string]string{} + } + + bucket := strings.Trim(config["bucket"], "/") + prefix := strings.Trim(config["prefix"], "/") + if backupLocation.Spec.ObjectStorage != nil { + bucket = strings.Trim(backupLocation.Spec.ObjectStorage.Bucket, "/") + prefix = strings.Trim(backupLocation.Spec.ObjectStorage.Prefix, "/") + } + + prefix = path.Join(prefix, udmrepo.StoreOptionPrefixName, repoName) + "/" + + region := config["region"] + + if backendType == repoconfig.AWSBackend { + s3Url := config["s3Url"] + + var err error + if s3Url == "" { + region, err = getS3BucketRegion(bucket) + if err != nil { + return map[string]string{}, errors.Wrap(err, "error get s3 bucket region") + } + + s3Url = fmt.Sprintf("s3-%s.amazonaws.com", region) + } + + result[udmrepo.StoreOptionS3Endpoint] = strings.Trim(s3Url, "/") + result[udmrepo.StoreOptionS3DisableTlsVerify] = config["insecureSkipTLSVerify"] + } else if backendType == repoconfig.AzureBackend { + result[udmrepo.StoreOptionAzureDomain] = getAzureStorageDomain(config) + } + + result[udmrepo.StoreOptionOssBucket] = bucket + result[udmrepo.StoreOptionPrefix] = prefix + result[udmrepo.StoreOptionOssRegion] = strings.Trim(region, "/") + result[udmrepo.StoreOptionFsPath] = config["fspath"] + + return result, nil +} + +func getRepoConfigFile(workPath string, repoID string) string { + ///TODO: call udmrepo to get config file + return "" +} + +func createRepoService(log logrus.FieldLogger) udmrepo.BackupRepoService { + ///TODO: call udmrepo create repo service + return nil +} diff --git a/pkg/repository/provider/unified_repo_test.go b/pkg/repository/provider/unified_repo_test.go new file mode 100644 index 000000000..f2cccb8e5 --- /dev/null +++ b/pkg/repository/provider/unified_repo_test.go @@ -0,0 +1,555 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package provider + +import ( + "errors" + "testing" + + awscredentials "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + corev1api "k8s.io/api/core/v1" + + velerocredentials "github.com/vmware-tanzu/velero/internal/credentials" + credmock "github.com/vmware-tanzu/velero/internal/credentials/mocks" + velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/repository/udmrepo" +) + +func TestGetStorageCredentials(t *testing.T) { + testCases := []struct { + name string + backupLocation velerov1api.BackupStorageLocation + credFileStore *credmock.FileStore + credStoreError error + credStorePath string + getAzureCredentials func(map[string]string) (string, string, error) + getS3Credentials func(map[string]string) (awscredentials.Value, error) + getGCPCredentials func(map[string]string) string + expected map[string]string + expectedErr string + }{ + { + name: "invalid credentials file store interface", + expected: map[string]string{}, + expectedErr: "invalid credentials interface", + }, + { + name: "invalid provider", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "invalid-provider", + }, + }, + credFileStore: new(credmock.FileStore), + expected: map[string]string{}, + expectedErr: "invalid storage provider", + }, + { + name: "credential section exists in BSL, file store fail", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "aws", + Credential: &corev1api.SecretKeySelector{}, + }, + }, + credFileStore: new(credmock.FileStore), + credStoreError: errors.New("fake error"), + expected: map[string]string{}, + expectedErr: "error get credential file in bsl: fake error", + }, + { + name: "aws, Credential section not exists in BSL", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "velero.io/aws", + Config: map[string]string{ + "credentialsFile": "credentials-from-config-map", + }, + }, + }, + getS3Credentials: func(config map[string]string) (awscredentials.Value, error) { + return awscredentials.Value{ + AccessKeyID: "from: " + config["credentialsFile"], + }, nil + }, + credFileStore: new(credmock.FileStore), + expected: map[string]string{ + "accessKeyID": "from: credentials-from-config-map", + "providerName": "", + "secretAccessKey": "", + "sessionToken": "", + }, + }, + { + name: "aws, Credential section exists in BSL", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "velero.io/aws", + Config: map[string]string{ + "credentialsFile": "credentials-from-config-map", + }, + Credential: &corev1api.SecretKeySelector{}, + }, + }, + credFileStore: new(credmock.FileStore), + credStorePath: "credentials-from-credential-key", + getS3Credentials: func(config map[string]string) (awscredentials.Value, error) { + return awscredentials.Value{ + AccessKeyID: "from: " + config["credentialsFile"], + }, nil + }, + + expected: map[string]string{ + "accessKeyID": "from: credentials-from-credential-key", + "providerName": "", + "secretAccessKey": "", + "sessionToken": "", + }, + }, + { + name: "aws, get credentials fail", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "velero.io/aws", + Config: map[string]string{ + "credentialsFile": "credentials-from-config-map", + }, + }, + }, + getS3Credentials: func(config map[string]string) (awscredentials.Value, error) { + return awscredentials.Value{}, errors.New("fake error") + }, + credFileStore: new(credmock.FileStore), + expected: map[string]string{}, + expectedErr: "error get s3 credentials: fake error", + }, + { + name: "azure, Credential section exists in BSL", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "velero.io/azure", + Config: map[string]string{ + "credentialsFile": "credentials-from-config-map", + }, + Credential: &corev1api.SecretKeySelector{}, + }, + }, + credFileStore: new(credmock.FileStore), + credStorePath: "credentials-from-credential-key", + getAzureCredentials: func(config map[string]string) (string, string, error) { + return "storage account from: " + config["credentialsFile"], "", nil + }, + + expected: map[string]string{ + "storageAccount": "storage account from: credentials-from-credential-key", + "storageKey": "", + }, + }, + { + name: "azure, get azure credentials fails", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "velero.io/azure", + Config: map[string]string{ + "credentialsFile": "credentials-from-config-map", + }, + }, + }, + getAzureCredentials: func(config map[string]string) (string, string, error) { + return "", "", errors.New("fake error") + }, + credFileStore: new(credmock.FileStore), + expected: map[string]string{}, + expectedErr: "error get azure credentials: fake error", + }, + { + name: "gcp, Credential section not exists in BSL", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "velero.io/gcp", + Config: map[string]string{ + "credentialsFile": "credentials-from-config-map", + }, + }, + }, + getGCPCredentials: func(config map[string]string) string { + return "credentials-from-config-map" + }, + credFileStore: new(credmock.FileStore), + expected: map[string]string{ + "credFile": "credentials-from-config-map", + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + getAzureCredentials = tc.getAzureCredentials + getS3Credentials = tc.getS3Credentials + getGCPCredentials = tc.getGCPCredentials + + var fileStore velerocredentials.FileStore + if tc.credFileStore != nil { + tc.credFileStore.On("Path", mock.Anything, mock.Anything).Return(tc.credStorePath, tc.credStoreError) + fileStore = tc.credFileStore + } + + actual, err := getStorageCredentials(&tc.backupLocation, fileStore) + + require.Equal(t, tc.expected, actual) + + if tc.expectedErr == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.expectedErr) + } + }) + } +} + +func TestGetStorageVariables(t *testing.T) { + testCases := []struct { + name string + backupLocation velerov1api.BackupStorageLocation + repoName string + getS3BucketRegion func(string) (string, error) + getAzureStorageDomain func(map[string]string) string + expected map[string]string + expectedErr string + }{ + { + name: "invalid provider", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "invalid-provider", + }, + }, + expected: map[string]string{}, + expectedErr: "invalid storage provider", + }, + { + name: "aws, ObjectStorage section not exists in BSL, s3Url exist", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "velero.io/aws", + Config: map[string]string{ + "bucket": "fake-bucket", + "prefix": "fake-prefix", + "region": "fake-region/", + "s3Url": "fake-url", + "insecureSkipTLSVerify": "true", + }, + }, + }, + expected: map[string]string{ + "bucket": "fake-bucket", + "prefix": "fake-prefix/unified-repo/", + "region": "fake-region", + "fspath": "", + "endpoint": "fake-url", + "skipTLSVerify": "true", + }, + }, + { + name: "aws, ObjectStorage section not exists in BSL, s3Url not exist", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "velero.io/aws", + Config: map[string]string{ + "bucket": "fake-bucket", + "prefix": "fake-prefix", + "insecureSkipTLSVerify": "false", + }, + }, + }, + getS3BucketRegion: func(bucket string) (string, error) { + return "region from bucket: " + bucket, nil + }, + expected: map[string]string{ + "bucket": "fake-bucket", + "prefix": "fake-prefix/unified-repo/", + "region": "region from bucket: fake-bucket", + "fspath": "", + "endpoint": "s3-region from bucket: fake-bucket.amazonaws.com", + "skipTLSVerify": "false", + }, + }, + { + name: "aws, ObjectStorage section not exists in BSL, s3Url not exist, get region fail", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "velero.io/aws", + Config: map[string]string{}, + }, + }, + getS3BucketRegion: func(bucket string) (string, error) { + return "", errors.New("fake error") + }, + expected: map[string]string{}, + expectedErr: "error get s3 bucket region: fake error", + }, + { + name: "aws, ObjectStorage section exists in BSL, s3Url exist", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "velero.io/aws", + Config: map[string]string{ + "bucket": "fake-bucket-config", + "prefix": "fake-prefix-config", + "region": "fake-region", + "s3Url": "fake-url", + "insecureSkipTLSVerify": "false", + }, + StorageType: velerov1api.StorageType{ + ObjectStorage: &velerov1api.ObjectStorageLocation{ + Bucket: "fake-bucket-object-store", + Prefix: "fake-prefix-object-store", + }, + }, + }, + }, + getS3BucketRegion: func(bucket string) (string, error) { + return "region from bucket: " + bucket, nil + }, + expected: map[string]string{ + "bucket": "fake-bucket-object-store", + "prefix": "fake-prefix-object-store/unified-repo/", + "region": "fake-region", + "fspath": "", + "endpoint": "fake-url", + "skipTLSVerify": "false", + }, + }, + { + name: "azure, ObjectStorage section exists in BSL", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "velero.io/azure", + Config: map[string]string{ + "bucket": "fake-bucket-config", + "prefix": "fake-prefix-config", + "region": "fake-region", + "fspath": "", + "storageDomain": "fake-domain", + }, + StorageType: velerov1api.StorageType{ + ObjectStorage: &velerov1api.ObjectStorageLocation{ + Bucket: "fake-bucket-object-store", + Prefix: "fake-prefix-object-store", + }, + }, + }, + }, + getAzureStorageDomain: func(config map[string]string) string { + return config["storageDomain"] + }, + expected: map[string]string{ + "bucket": "fake-bucket-object-store", + "prefix": "fake-prefix-object-store/unified-repo/", + "region": "fake-region", + "fspath": "", + "storageDomain": "fake-domain", + }, + }, + { + name: "azure, ObjectStorage section not exists in BSL, repo name exists", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "velero.io/azure", + Config: map[string]string{ + "bucket": "fake-bucket", + "prefix": "fake-prefix", + "region": "fake-region", + "fspath": "", + "storageDomain": "fake-domain", + }, + }, + }, + repoName: "//fake-name//", + getAzureStorageDomain: func(config map[string]string) string { + return config["storageDomain"] + }, + expected: map[string]string{ + "bucket": "fake-bucket", + "prefix": "fake-prefix/unified-repo/fake-name/", + "region": "fake-region", + "fspath": "", + "storageDomain": "fake-domain", + }, + }, + { + name: "fs", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "velero.io/fs", + Config: map[string]string{ + "fspath": "fake-path", + "prefix": "fake-prefix", + }, + }, + }, + expected: map[string]string{ + "fspath": "fake-path", + "bucket": "", + "prefix": "fake-prefix/unified-repo/", + "region": "", + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + getS3BucketRegion = tc.getS3BucketRegion + getAzureStorageDomain = tc.getAzureStorageDomain + + actual, err := getStorageVariables(&tc.backupLocation, tc.repoName) + + require.Equal(t, tc.expected, actual) + + if tc.expectedErr == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.expectedErr) + } + }) + } +} + +func TestGetRepoPassword(t *testing.T) { + testCases := []struct { + name string + getter *credmock.SecretStore + credStoreReturn string + credStoreError error + cached string + expected string + expectedErr string + }{ + { + name: "invalid secret interface", + expectedErr: "invalid credentials interface", + }, + { + name: "error from secret interface", + getter: new(credmock.SecretStore), + credStoreError: errors.New("fake error"), + expectedErr: "error to get password buffer: fake error", + }, + { + name: "secret with whitespace", + getter: new(credmock.SecretStore), + credStoreReturn: " fake-passwor d ", + expected: "fake-passwor d", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var secretStore velerocredentials.SecretStore + if tc.getter != nil { + tc.getter.On("Get", mock.Anything, mock.Anything).Return(tc.credStoreReturn, tc.credStoreError) + secretStore = tc.getter + } + + urp := unifiedRepoProvider{ + credentialGetter: velerocredentials.CredentialGetter{ + FromSecret: secretStore, + }, + } + + password, err := getRepoPassword(urp.credentialGetter.FromSecret, RepoParam{}) + + require.Equal(t, tc.expected, password) + + if tc.expectedErr == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.expectedErr) + } + }) + } +} + +func TestGetRepoOption(t *testing.T) { + testCases := []struct { + name string + funcTable localFuncTable + getRepoPassword func(velerocredentials.SecretStore, RepoParam) (string, error) + getStorageCredentials func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) + getStorageVariables func(*velerov1api.BackupStorageLocation, string) (map[string]string, error) + expected udmrepo.RepoOptions + expectedErr string + }{ + { + name: "get repo password fail", + funcTable: localFuncTable{ + getRepoPassword: func(velerocredentials.SecretStore, RepoParam) (string, error) { + return "", errors.New("fake-error-1") + }, + }, + expectedErr: "error to get repo password: fake-error-1", + }, + { + name: "get storage variable fail", + funcTable: localFuncTable{ + getRepoPassword: func(velerocredentials.SecretStore, RepoParam) (string, error) { + return "fake-password", nil + }, + getStorageVariables: func(*velerov1api.BackupStorageLocation, string) (map[string]string, error) { + return map[string]string{}, errors.New("fake-error-2") + }, + }, + expectedErr: "error to get storage variables: fake-error-2", + }, + { + name: "get storage credentials fail", + funcTable: localFuncTable{ + getRepoPassword: func(velerocredentials.SecretStore, RepoParam) (string, error) { + return "fake-password", nil + }, + getStorageVariables: func(*velerov1api.BackupStorageLocation, string) (map[string]string, error) { + return map[string]string{}, nil + }, + getStorageCredentials: func(*velerov1api.BackupStorageLocation, velerocredentials.FileStore) (map[string]string, error) { + return map[string]string{}, errors.New("fake-error-3") + }, + }, + expectedErr: "error to get repo credentials: fake-error-3", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + funcTable = tc.funcTable + urp := unifiedRepoProvider{} + + password, err := urp.getRepoOption(RepoParam{}) + + require.Equal(t, tc.expected, password) + + if tc.expectedErr == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.expectedErr) + } + }) + } +} diff --git a/pkg/repository/udmrepo/repo-option-consts.go b/pkg/repository/udmrepo/repo-option-consts.go new file mode 100644 index 000000000..7cf55d017 --- /dev/null +++ b/pkg/repository/udmrepo/repo-option-consts.go @@ -0,0 +1,58 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package udmrepo + +const ( + StorageTypeS3 = "s3" + StorageTypeAzure = "azure" + StorageTypeFs = "filesystem" + StorageTypeGcs = "gcs" + + GenOptionMaintainMode = "mode" + GenOptionMaintainFull = "full" + GenOptionMaintainQuick = "quick" + + StoreOptionS3KeyId = "accessKeyID" + StoreOptionS3Provider = "providerName" + StoreOptionS3SecretKey = "secretAccessKey" + StoreOptionS3Token = "sessionToken" + StoreOptionS3Endpoint = "endpoint" + StoreOptionS3DisableTls = "doNotUseTLS" + StoreOptionS3DisableTlsVerify = "skipTLSVerify" + + StoreOptionAzureKey = "storageKey" + StoreOptionAzureDomain = "storageDomain" + StoreOptionAzureStorageAccount = "storageAccount" + StoreOptionAzureToken = "sasToken" + + StoreOptionFsPath = "fspath" + + StoreOptionGcsReadonly = "readonly" + + StoreOptionOssBucket = "bucket" + StoreOptionOssRegion = "region" + + StoreOptionCredentialFile = "credFile" + StoreOptionPrefix = "prefix" + StoreOptionPrefixName = "unified-repo" + + ThrottleOptionReadOps = "readOPS" + ThrottleOptionWriteOps = "writeOPS" + ThrottleOptionListOps = "listOPS" + ThrottleOptionUploadBytes = "uploadBytes" + ThrottleOptionDownloadBytes = "downloadBytes" +) diff --git a/pkg/repository/udmrepo/repo.go b/pkg/repository/udmrepo/repo.go new file mode 100644 index 000000000..be18a6d17 --- /dev/null +++ b/pkg/repository/udmrepo/repo.go @@ -0,0 +1,171 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package udmrepo + +import ( + "context" + "io" + "time" +) + +type ID string + +// ManifestEntryMetadata is the metadata describing one manifest data +type ManifestEntryMetadata struct { + ID ID // The ID of the manifest data + Length int32 // The data size of the manifest data + Labels map[string]string // Labels saved together with the manifest data + ModTime time.Time // Modified time of the manifest data +} + +type RepoManifest struct { + Payload interface{} // The user data of manifest + Metadata *ManifestEntryMetadata // The metadata data of manifest +} + +type ManifestFilter struct { + Labels map[string]string +} + +const ( + // Below consts descrbe the data type of one object. + // Metadata: This type describes how the data is organized. + // For a file system backup, the Metadata describes a Dir or File. + // For a block backup, the Metadata describes a Disk and its incremental link. + ObjectDataTypeUnknown int = 0 + ObjectDataTypeMetadata int = 1 + ObjectDataTypeData int = 2 + + // Below consts defines the access mode when creating an object for write + ObjectDataAccessModeUnknown int = 0 + ObjectDataAccessModeFile int = 1 + ObjectDataAccessModeBlock int = 2 + + ObjectDataBackupModeUnknown int = 0 + ObjectDataBackupModeFull int = 1 + ObjectDataBackupModeInc int = 2 +) + +// ObjectWriteOptions defines the options when creating an object for write +type ObjectWriteOptions struct { + FullPath string // Full logical path of the object + DataType int // OBJECT_DATA_TYPE_* + Description string // A description of the object, could be empty + Prefix ID // A prefix of the name used to save the object + AccessMode int // OBJECT_DATA_ACCESS_* + BackupMode int // OBJECT_DATA_BACKUP_* +} + +// OwnershipOptions is used to add some access control to the unified repository. +// For example, some privileged operations of the unified repository can be done by the +// repository owner only; the data of a backup may be manipulated by the backup owner +// who created it only. It is optional for a backup repository to support this ownership control. +type OwnershipOptions struct { + Username string + DomainName string + FullQualified string +} + +type RepoOptions struct { + // A repository specific string to identify a backup storage, i.e., "s3", "filesystem" + StorageType string + // Backup repository password, if any + RepoPassword string + // A custom path to save the repository's configuration, if any + ConfigFilePath string + // The ownership for the current repository operation + Ownership OwnershipOptions + // Other repository specific options + GeneralOptions map[string]string + // Storage specific options + StorageOptions map[string]string + + // Description of the backup repository + Description string +} + +// BackupRepoService is used to initialize, open or maintain a backup repository +type BackupRepoService interface { + // Create a backup repository or connect to an existing backup repository. + // repoOption: option to the backup repository and the underlying backup storage. + // createNew: indicates whether to create a new or connect to an existing backup repository. + Init(ctx context.Context, repoOption RepoOptions, createNew bool) error + + // Open an backup repository that has been created/connected. + // repoOption: options to open the backup repository and the underlying storage. + Open(ctx context.Context, repoOption RepoOptions) (BackupRepo, error) + + // Periodically called to maintain the backup repository to eliminate redundant data and improve performance. + // repoOption: options to maintain the backup repository. + Maintain(ctx context.Context, repoOption RepoOptions) error +} + +// BackupRepo provides the access to the backup repository +type BackupRepo interface { + // Open an existing object for read. + // id: the object's unified identifier. + OpenObject(ctx context.Context, id ID) (ObjectReader, error) + + // Get a manifest data. + GetManifest(ctx context.Context, id ID, mani *RepoManifest) error + + // Get one or more manifest data that match the given labels + FindManifests(ctx context.Context, filter ManifestFilter) ([]*ManifestEntryMetadata, error) + + // Create a new object and return the object's writer interface. + // return: A unified identifier of the object on success. + NewObjectWriter(ctx context.Context, opt ObjectWriteOptions) ObjectWriter + + // Save a manifest object + PutManifest(ctx context.Context, mani RepoManifest) (ID, error) + + // Delete a manifest object + DeleteManifest(ctx context.Context, id ID) error + + // Flush all the backup repository data + Flush(ctx context.Context) error + + // Get the local time of the backup repository. It may be different from the time of the caller + Time() time.Time + + // Close the backup repository + Close(ctx context.Context) error +} + +type ObjectReader interface { + io.ReadCloser + io.Seeker + + // Length returns the logical size of the object + Length() int64 +} + +type ObjectWriter interface { + io.WriteCloser + + // For some cases, i.e. block incremental, the object is not written sequentially + io.Seeker + + // Periodically called to preserve the state of data written to the repo so far. + // Return a unified identifier that represent the current state. + // An empty ID could be returned on success if the backup repository doesn't support this. + Checkpoint() (ID, error) + + // Wait for the completion of the object write. + // Result returns the object's unified identifier after the write completes. + Result() (ID, error) +} diff --git a/pkg/restic/aws.go b/pkg/restic/aws.go deleted file mode 100644 index d97c5f0b7..000000000 --- a/pkg/restic/aws.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright the Velero contributors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package restic - -const ( - // AWS specific environment variable - awsProfileEnvVar = "AWS_PROFILE" - awsProfileKey = "profile" - awsCredentialsFileEnvVar = "AWS_SHARED_CREDENTIALS_FILE" -) - -// getS3ResticEnvVars gets the environment variables that restic -// relies on (AWS_PROFILE) based on info in the provided object -// storage location config map. -func getS3ResticEnvVars(config map[string]string) (map[string]string, error) { - result := make(map[string]string) - - if credentialsFile, ok := config[credentialsFileKey]; ok { - result[awsCredentialsFileEnvVar] = credentialsFile - } - - if profile, ok := config[awsProfileKey]; ok { - result[awsProfileEnvVar] = profile - } - - return result, nil -} diff --git a/pkg/restic/common.go b/pkg/restic/common.go index 23c09e558..6e2671625 100644 --- a/pkg/restic/common.go +++ b/pkg/restic/common.go @@ -20,11 +20,9 @@ import ( "context" "fmt" "os" - "strings" "time" "github.com/pkg/errors" - corev1api "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "sigs.k8s.io/controller-runtime/pkg/client" @@ -32,6 +30,7 @@ import ( "github.com/vmware-tanzu/velero/internal/credentials" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/label" + repoconfig "github.com/vmware-tanzu/velero/pkg/repository/config" "github.com/vmware-tanzu/velero/pkg/util/filesystem" ) @@ -50,182 +49,8 @@ const ( // DefaultVolumesToRestic specifies whether restic should be used, by default, to // take backup of all pod volumes. DefaultVolumesToRestic = false - - // PVCNameAnnotation is the key for the annotation added to - // pod volume backups when they're for a PVC. - PVCNameAnnotation = "velero.io/pvc-name" - - // VolumesToBackupAnnotation is the annotation on a pod whose mounted volumes - // need to be backed up using restic. - VolumesToBackupAnnotation = "backup.velero.io/backup-volumes" - - // VolumesToExcludeAnnotation is the annotation on a pod whose mounted volumes - // should be excluded from restic backup. - VolumesToExcludeAnnotation = "backup.velero.io/backup-volumes-excludes" - - // credentialsFileKey is the key within a BSL config that is checked to see if - // the BSL is using its own credentials, rather than those in the environment - credentialsFileKey = "credentialsFile" - - // Deprecated. - // - // TODO(2.0): remove - podAnnotationPrefix = "snapshot.velero.io/" ) -// getPodSnapshotAnnotations returns a map, of volume name -> snapshot id, -// of all restic snapshots for this pod. -// TODO(2.0) to remove -// Deprecated: we will stop using pod annotations to record restic snapshot IDs after they're taken, -// therefore we won't need to check if these annotations exist. -func getPodSnapshotAnnotations(obj metav1.Object) map[string]string { - var res map[string]string - - insertSafe := func(k, v string) { - if res == nil { - res = make(map[string]string) - } - res[k] = v - } - - for k, v := range obj.GetAnnotations() { - if strings.HasPrefix(k, podAnnotationPrefix) { - insertSafe(k[len(podAnnotationPrefix):], v) - } - } - - return res -} - -func isPVBMatchPod(pvb *velerov1api.PodVolumeBackup, podName string, namespace string) bool { - return podName == pvb.Spec.Pod.Name && namespace == pvb.Spec.Pod.Namespace -} - -// volumeHasNonRestorableSource checks if the given volume exists in the list of podVolumes -// and returns true if the volume's source is not restorable. This is true for volumes with -// a Projected or DownwardAPI source. -func volumeHasNonRestorableSource(volumeName string, podVolumes []corev1api.Volume) bool { - var volume corev1api.Volume - for _, v := range podVolumes { - if v.Name == volumeName { - volume = v - break - } - } - return volume.Projected != nil || volume.DownwardAPI != nil -} - -// GetVolumeBackupsForPod returns a map, of volume name -> snapshot id, -// of the PodVolumeBackups that exist for the provided pod. -func GetVolumeBackupsForPod(podVolumeBackups []*velerov1api.PodVolumeBackup, pod *corev1api.Pod, sourcePodNs string) map[string]string { - volumes := make(map[string]string) - - for _, pvb := range podVolumeBackups { - if !isPVBMatchPod(pvb, pod.GetName(), sourcePodNs) { - continue - } - - // skip PVBs without a snapshot ID since there's nothing - // to restore (they could be failed, or for empty volumes). - if pvb.Status.SnapshotID == "" { - continue - } - - // If the volume came from a projected or DownwardAPI source, skip its restore. - // This allows backups affected by https://github.com/vmware-tanzu/velero/issues/3863 - // or https://github.com/vmware-tanzu/velero/issues/4053 to be restored successfully. - if volumeHasNonRestorableSource(pvb.Spec.Volume, pod.Spec.Volumes) { - continue - } - - volumes[pvb.Spec.Volume] = pvb.Status.SnapshotID - } - - if len(volumes) > 0 { - return volumes - } - - return getPodSnapshotAnnotations(pod) -} - -// GetVolumesToBackup returns a list of volume names to backup for -// the provided pod. -// Deprecated: Use GetPodVolumesUsingRestic instead. -func GetVolumesToBackup(obj metav1.Object) []string { - annotations := obj.GetAnnotations() - if annotations == nil { - return nil - } - - backupsValue := annotations[VolumesToBackupAnnotation] - if backupsValue == "" { - return nil - } - - return strings.Split(backupsValue, ",") -} - -func getVolumesToExclude(obj metav1.Object) []string { - annotations := obj.GetAnnotations() - if annotations == nil { - return nil - } - - return strings.Split(annotations[VolumesToExcludeAnnotation], ",") -} - -func contains(list []string, k string) bool { - for _, i := range list { - if i == k { - return true - } - } - return false -} - -// GetPodVolumesUsingRestic returns a list of volume names to backup for the provided pod. -func GetPodVolumesUsingRestic(pod *corev1api.Pod, defaultVolumesToRestic bool) []string { - if !defaultVolumesToRestic { - return GetVolumesToBackup(pod) - } - - volsToExclude := getVolumesToExclude(pod) - podVolumes := []string{} - for _, pv := range pod.Spec.Volumes { - // cannot backup hostpath volumes as they are not mounted into /var/lib/kubelet/pods - // and therefore not accessible to the restic daemon set. - if pv.HostPath != nil { - continue - } - // don't backup volumes mounting secrets. Secrets will be backed up separately. - if pv.Secret != nil { - continue - } - // don't backup volumes mounting config maps. Config maps will be backed up separately. - if pv.ConfigMap != nil { - continue - } - // don't backup volumes mounted as projected volumes, all data in those come from kube state. - if pv.Projected != nil { - continue - } - // don't backup DownwardAPI volumes, all data in those come from kube state. - if pv.DownwardAPI != nil { - continue - } - // don't backup volumes that are included in the exclude list. - if contains(volsToExclude, pv.Name) { - continue - } - // don't include volumes that mount the default service account token. - if strings.HasPrefix(pv.Name, "default-token") { - continue - } - podVolumes = append(podVolumes, pv.Name) - } - return podVolumes -} - // SnapshotIdentifier uniquely identifies a restic snapshot // taken by Velero. type SnapshotIdentifier struct { @@ -322,24 +147,24 @@ func CmdEnv(backupLocation *velerov1api.BackupStorageLocation, credentialFileSto if err != nil { return []string{}, errors.WithStack(err) } - config[credentialsFileKey] = credsFile + config[repoconfig.CredentialsFileKey] = credsFile } - backendType := getBackendType(backupLocation.Spec.Provider) + backendType := repoconfig.GetBackendType(backupLocation.Spec.Provider) switch backendType { - case AWSBackend: - customEnv, err = getS3ResticEnvVars(config) + case repoconfig.AWSBackend: + customEnv, err = repoconfig.GetS3ResticEnvVars(config) if err != nil { return []string{}, err } - case AzureBackend: - customEnv, err = getAzureResticEnvVars(config) + case repoconfig.AzureBackend: + customEnv, err = repoconfig.GetAzureResticEnvVars(config) if err != nil { return []string{}, err } - case GCPBackend: - customEnv, err = getGCPResticEnvVars(config) + case repoconfig.GCPBackend: + customEnv, err = repoconfig.GetGCPResticEnvVars(config) if err != nil { return []string{}, err } diff --git a/pkg/restic/common_test.go b/pkg/restic/common_test.go index 7f3e0c503..fac82f901 100644 --- a/pkg/restic/common_test.go +++ b/pkg/restic/common_test.go @@ -28,212 +28,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" - "github.com/vmware-tanzu/velero/pkg/builder" velerotest "github.com/vmware-tanzu/velero/pkg/test" ) -func TestGetVolumeBackupsForPod(t *testing.T) { - tests := []struct { - name string - podVolumeBackups []*velerov1api.PodVolumeBackup - podVolumes []corev1api.Volume - podAnnotations map[string]string - podName string - sourcePodNs string - expected map[string]string - }{ - { - name: "nil annotations results in no volume backups returned", - podAnnotations: nil, - expected: nil, - }, - { - name: "empty annotations results in no volume backups returned", - podAnnotations: make(map[string]string), - expected: nil, - }, - { - name: "pod annotations with no snapshot annotation prefix results in no volume backups returned", - podAnnotations: map[string]string{"foo": "bar"}, - expected: nil, - }, - { - name: "pod annotation with only snapshot annotation prefix, results in volume backup with empty volume key", - podAnnotations: map[string]string{podAnnotationPrefix: "snapshotID"}, - expected: map[string]string{"": "snapshotID"}, - }, - { - name: "pod annotation with snapshot annotation prefix results in volume backup with volume name and snapshot ID", - podAnnotations: map[string]string{podAnnotationPrefix + "volume": "snapshotID"}, - expected: map[string]string{"volume": "snapshotID"}, - }, - { - name: "only pod annotations with snapshot annotation prefix are considered", - podAnnotations: map[string]string{"x": "y", podAnnotationPrefix + "volume1": "snapshot1", podAnnotationPrefix + "volume2": "snapshot2"}, - expected: map[string]string{"volume1": "snapshot1", "volume2": "snapshot2"}, - }, - { - name: "pod annotations are not considered if PVBs are provided", - podVolumeBackups: []*velerov1api.PodVolumeBackup{ - builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot1").Volume("pvbtest1-foo").Result(), - builder.ForPodVolumeBackup("velero", "pvb-2").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot2").Volume("pvbtest2-abc").Result(), - }, - podName: "TestPod", - sourcePodNs: "TestNS", - podAnnotations: map[string]string{"x": "y", podAnnotationPrefix + "foo": "bar", podAnnotationPrefix + "abc": "123"}, - expected: map[string]string{"pvbtest1-foo": "snapshot1", "pvbtest2-abc": "snapshot2"}, - }, - { - name: "volume backups are returned even if no pod annotations are present", - podVolumeBackups: []*velerov1api.PodVolumeBackup{ - builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot1").Volume("pvbtest1-foo").Result(), - builder.ForPodVolumeBackup("velero", "pvb-2").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot2").Volume("pvbtest2-abc").Result(), - }, - podName: "TestPod", - sourcePodNs: "TestNS", - expected: map[string]string{"pvbtest1-foo": "snapshot1", "pvbtest2-abc": "snapshot2"}, - }, - { - name: "only volumes from PVBs with snapshot IDs are returned", - podVolumeBackups: []*velerov1api.PodVolumeBackup{ - builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot1").Volume("pvbtest1-foo").Result(), - builder.ForPodVolumeBackup("velero", "pvb-2").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot2").Volume("pvbtest2-abc").Result(), - builder.ForPodVolumeBackup("velero", "pvb-3").PodName("TestPod").PodNamespace("TestNS").Volume("pvbtest3-foo").Result(), - builder.ForPodVolumeBackup("velero", "pvb-4").PodName("TestPod").PodNamespace("TestNS").Volume("pvbtest4-abc").Result(), - }, - podName: "TestPod", - sourcePodNs: "TestNS", - expected: map[string]string{"pvbtest1-foo": "snapshot1", "pvbtest2-abc": "snapshot2"}, - }, - { - name: "only volumes from PVBs for the given pod are returned", - podVolumeBackups: []*velerov1api.PodVolumeBackup{ - builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot1").Volume("pvbtest1-foo").Result(), - builder.ForPodVolumeBackup("velero", "pvb-2").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot2").Volume("pvbtest2-abc").Result(), - builder.ForPodVolumeBackup("velero", "pvb-3").PodName("TestAnotherPod").SnapshotID("snapshot3").Volume("pvbtest3-xyz").Result(), - }, - podName: "TestPod", - sourcePodNs: "TestNS", - expected: map[string]string{"pvbtest1-foo": "snapshot1", "pvbtest2-abc": "snapshot2"}, - }, - { - name: "only volumes from PVBs which match the pod name and source pod namespace are returned", - podVolumeBackups: []*velerov1api.PodVolumeBackup{ - builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot1").Volume("pvbtest1-foo").Result(), - builder.ForPodVolumeBackup("velero", "pvb-2").PodName("TestAnotherPod").PodNamespace("TestNS").SnapshotID("snapshot2").Volume("pvbtest2-abc").Result(), - builder.ForPodVolumeBackup("velero", "pvb-3").PodName("TestPod").PodNamespace("TestAnotherNS").SnapshotID("snapshot3").Volume("pvbtest3-xyz").Result(), - }, - podName: "TestPod", - sourcePodNs: "TestNS", - expected: map[string]string{"pvbtest1-foo": "snapshot1"}, - }, - { - name: "volumes from PVBs that correspond to a pod volume from a projected source are not returned", - podVolumeBackups: []*velerov1api.PodVolumeBackup{ - builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot1").Volume("pvb-non-projected").Result(), - builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot2").Volume("pvb-projected").Result(), - }, - podVolumes: []corev1api.Volume{ - { - Name: "pvb-non-projected", - VolumeSource: corev1api.VolumeSource{ - PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{}, - }, - }, - { - Name: "pvb-projected", - VolumeSource: corev1api.VolumeSource{ - Projected: &corev1api.ProjectedVolumeSource{}, - }, - }, - }, - podName: "TestPod", - sourcePodNs: "TestNS", - expected: map[string]string{"pvb-non-projected": "snapshot1"}, - }, - { - name: "volumes from PVBs that correspond to a pod volume from a DownwardAPI source are not returned", - podVolumeBackups: []*velerov1api.PodVolumeBackup{ - builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot1").Volume("pvb-non-downwardapi").Result(), - builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot2").Volume("pvb-downwardapi").Result(), - }, - podVolumes: []corev1api.Volume{ - { - Name: "pvb-non-downwardapi", - VolumeSource: corev1api.VolumeSource{ - PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{}, - }, - }, - { - Name: "pvb-downwardapi", - VolumeSource: corev1api.VolumeSource{ - DownwardAPI: &corev1api.DownwardAPIVolumeSource{}, - }, - }, - }, - podName: "TestPod", - sourcePodNs: "TestNS", - expected: map[string]string{"pvb-non-downwardapi": "snapshot1"}, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - pod := &corev1api.Pod{} - pod.Annotations = test.podAnnotations - pod.Name = test.podName - pod.Spec.Volumes = test.podVolumes - - res := GetVolumeBackupsForPod(test.podVolumeBackups, pod, test.sourcePodNs) - assert.Equal(t, test.expected, res) - }) - } -} - -func TestGetVolumesToBackup(t *testing.T) { - tests := []struct { - name string - annotations map[string]string - expected []string - }{ - { - name: "nil annotations", - annotations: nil, - expected: nil, - }, - { - name: "no volumes to backup", - annotations: map[string]string{"foo": "bar"}, - expected: nil, - }, - { - name: "one volume to backup", - annotations: map[string]string{"foo": "bar", VolumesToBackupAnnotation: "volume-1"}, - expected: []string{"volume-1"}, - }, - { - name: "multiple volumes to backup", - annotations: map[string]string{"foo": "bar", VolumesToBackupAnnotation: "volume-1,volume-2,volume-3"}, - expected: []string{"volume-1", "volume-2", "volume-3"}, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - pod := &corev1api.Pod{} - pod.Annotations = test.annotations - - res := GetVolumesToBackup(pod) - - // sort to ensure good compare of slices - sort.Strings(test.expected) - sort.Strings(res) - - assert.Equal(t, test.expected, res) - }) - } -} - func TestGetSnapshotsInBackup(t *testing.T) { tests := []struct { name string @@ -419,410 +216,3 @@ func TestTempCACertFile(t *testing.T) { os.Remove(fileName) } - -func TestGetPodVolumesUsingRestic(t *testing.T) { - testCases := []struct { - name string - pod *corev1api.Pod - expected []string - defaultVolumesToRestic bool - }{ - { - name: "should get PVs from VolumesToBackupAnnotation when defaultVolumesToRestic is false", - defaultVolumesToRestic: false, - pod: &corev1api.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - VolumesToBackupAnnotation: "resticPV1,resticPV2,resticPV3", - }, - }, - }, - expected: []string{"resticPV1", "resticPV2", "resticPV3"}, - }, - { - name: "should get all pod volumes when defaultVolumesToRestic is true and no PVs are excluded", - defaultVolumesToRestic: true, - pod: &corev1api.Pod{ - Spec: corev1api.PodSpec{ - Volumes: []corev1api.Volume{ - // Restic Volumes - {Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"}, - }, - }, - }, - expected: []string{"resticPV1", "resticPV2", "resticPV3"}, - }, - { - name: "should get all pod volumes except ones excluded when defaultVolumesToRestic is true", - defaultVolumesToRestic: true, - pod: &corev1api.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - VolumesToExcludeAnnotation: "nonResticPV1,nonResticPV2,nonResticPV3", - }, - }, - Spec: corev1api.PodSpec{ - Volumes: []corev1api.Volume{ - // Restic Volumes - {Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"}, - /// Excluded from restic through annotation - {Name: "nonResticPV1"}, {Name: "nonResticPV2"}, {Name: "nonResticPV3"}, - }, - }, - }, - expected: []string{"resticPV1", "resticPV2", "resticPV3"}, - }, - { - name: "should exclude default service account token from restic backup", - defaultVolumesToRestic: true, - pod: &corev1api.Pod{ - Spec: corev1api.PodSpec{ - Volumes: []corev1api.Volume{ - // Restic Volumes - {Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"}, - /// Excluded from restic because colume mounting default service account token - {Name: "default-token-5xq45"}, - }, - }, - }, - expected: []string{"resticPV1", "resticPV2", "resticPV3"}, - }, - { - name: "should exclude host path volumes from restic backups", - defaultVolumesToRestic: true, - pod: &corev1api.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - VolumesToExcludeAnnotation: "nonResticPV1,nonResticPV2,nonResticPV3", - }, - }, - Spec: corev1api.PodSpec{ - Volumes: []corev1api.Volume{ - // Restic Volumes - {Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"}, - /// Excluded from restic through annotation - {Name: "nonResticPV1"}, {Name: "nonResticPV2"}, {Name: "nonResticPV3"}, - // Excluded from restic because hostpath - {Name: "hostPath1", VolumeSource: corev1api.VolumeSource{HostPath: &corev1api.HostPathVolumeSource{Path: "/hostpathVol"}}}, - }, - }, - }, - expected: []string{"resticPV1", "resticPV2", "resticPV3"}, - }, - { - name: "should exclude volumes mounting secrets", - defaultVolumesToRestic: true, - pod: &corev1api.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - VolumesToExcludeAnnotation: "nonResticPV1,nonResticPV2,nonResticPV3", - }, - }, - Spec: corev1api.PodSpec{ - Volumes: []corev1api.Volume{ - // Restic Volumes - {Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"}, - /// Excluded from restic through annotation - {Name: "nonResticPV1"}, {Name: "nonResticPV2"}, {Name: "nonResticPV3"}, - // Excluded from restic because hostpath - {Name: "superSecret", VolumeSource: corev1api.VolumeSource{Secret: &corev1api.SecretVolumeSource{SecretName: "super-secret"}}}, - }, - }, - }, - expected: []string{"resticPV1", "resticPV2", "resticPV3"}, - }, - { - name: "should exclude volumes mounting config maps", - defaultVolumesToRestic: true, - pod: &corev1api.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - VolumesToExcludeAnnotation: "nonResticPV1,nonResticPV2,nonResticPV3", - }, - }, - Spec: corev1api.PodSpec{ - Volumes: []corev1api.Volume{ - // Restic Volumes - {Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"}, - /// Excluded from restic through annotation - {Name: "nonResticPV1"}, {Name: "nonResticPV2"}, {Name: "nonResticPV3"}, - // Excluded from restic because hostpath - {Name: "appCOnfig", VolumeSource: corev1api.VolumeSource{ConfigMap: &corev1api.ConfigMapVolumeSource{LocalObjectReference: corev1api.LocalObjectReference{Name: "app-config"}}}}, - }, - }, - }, - expected: []string{"resticPV1", "resticPV2", "resticPV3"}, - }, - { - name: "should exclude projected volumes", - defaultVolumesToRestic: true, - pod: &corev1api.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - VolumesToExcludeAnnotation: "nonResticPV1,nonResticPV2,nonResticPV3", - }, - }, - Spec: corev1api.PodSpec{ - Volumes: []corev1api.Volume{ - {Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"}, - { - Name: "projected", - VolumeSource: corev1api.VolumeSource{ - Projected: &corev1api.ProjectedVolumeSource{ - Sources: []corev1api.VolumeProjection{{ - Secret: &corev1api.SecretProjection{ - LocalObjectReference: corev1api.LocalObjectReference{}, - Items: nil, - Optional: nil, - }, - DownwardAPI: nil, - ConfigMap: nil, - ServiceAccountToken: nil, - }}, - DefaultMode: nil, - }, - }, - }, - }, - }, - }, - expected: []string{"resticPV1", "resticPV2", "resticPV3"}, - }, - { - name: "should exclude DownwardAPI volumes", - defaultVolumesToRestic: true, - pod: &corev1api.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{ - VolumesToExcludeAnnotation: "nonResticPV1,nonResticPV2,nonResticPV3", - }, - }, - Spec: corev1api.PodSpec{ - Volumes: []corev1api.Volume{ - {Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"}, - { - Name: "downwardAPI", - VolumeSource: corev1api.VolumeSource{ - DownwardAPI: &corev1api.DownwardAPIVolumeSource{ - Items: []corev1api.DownwardAPIVolumeFile{ - { - Path: "labels", - FieldRef: &corev1api.ObjectFieldSelector{ - APIVersion: "v1", - FieldPath: "metadata.labels", - }, - }, - }, - }, - }, - }, - }, - }, - }, - expected: []string{"resticPV1", "resticPV2", "resticPV3"}, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - actual := GetPodVolumesUsingRestic(tc.pod, tc.defaultVolumesToRestic) - - sort.Strings(tc.expected) - sort.Strings(actual) - assert.Equal(t, tc.expected, actual) - }) - } -} - -func TestIsPVBMatchPod(t *testing.T) { - testCases := []struct { - name string - pvb velerov1api.PodVolumeBackup - podName string - sourcePodNs string - expected bool - }{ - { - name: "should match PVB and pod", - pvb: velerov1api.PodVolumeBackup{ - Spec: velerov1api.PodVolumeBackupSpec{ - Pod: corev1api.ObjectReference{ - Name: "matching-pod", - Namespace: "matching-namespace", - }, - }, - }, - podName: "matching-pod", - sourcePodNs: "matching-namespace", - expected: true, - }, - { - name: "should not match PVB and pod, pod name mismatch", - pvb: velerov1api.PodVolumeBackup{ - Spec: velerov1api.PodVolumeBackupSpec{ - Pod: corev1api.ObjectReference{ - Name: "matching-pod", - Namespace: "matching-namespace", - }, - }, - }, - podName: "not-matching-pod", - sourcePodNs: "matching-namespace", - expected: false, - }, - { - name: "should not match PVB and pod, pod namespace mismatch", - pvb: velerov1api.PodVolumeBackup{ - Spec: velerov1api.PodVolumeBackupSpec{ - Pod: corev1api.ObjectReference{ - Name: "matching-pod", - Namespace: "matching-namespace", - }, - }, - }, - podName: "matching-pod", - sourcePodNs: "not-matching-namespace", - expected: false, - }, - { - name: "should not match PVB and pod, pod name and namespace mismatch", - pvb: velerov1api.PodVolumeBackup{ - Spec: velerov1api.PodVolumeBackupSpec{ - Pod: corev1api.ObjectReference{ - Name: "matching-pod", - Namespace: "matching-namespace", - }, - }, - }, - podName: "not-matching-pod", - sourcePodNs: "not-matching-namespace", - expected: false, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - actual := isPVBMatchPod(&tc.pvb, tc.podName, tc.sourcePodNs) - assert.Equal(t, tc.expected, actual) - }) - - } -} - -func TestVolumeHasNonRestorableSource(t *testing.T) { - testCases := []struct { - name string - volumeName string - podVolumes []corev1api.Volume - expected bool - }{ - { - name: "volume name not in list of volumes", - volumeName: "missing-volume", - podVolumes: []corev1api.Volume{ - { - Name: "restorable", - VolumeSource: corev1api.VolumeSource{ - PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{}, - }, - }, - { - Name: "projected", - VolumeSource: corev1api.VolumeSource{ - Projected: &corev1api.ProjectedVolumeSource{}, - }, - }, - { - Name: "downwardapi", - VolumeSource: corev1api.VolumeSource{ - DownwardAPI: &corev1api.DownwardAPIVolumeSource{}, - }, - }, - }, - expected: false, - }, - { - name: "volume name in list of volumes but not projected or DownwardAPI", - volumeName: "restorable", - podVolumes: []corev1api.Volume{ - { - Name: "restorable", - VolumeSource: corev1api.VolumeSource{ - PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{}, - }, - }, - { - Name: "projected", - VolumeSource: corev1api.VolumeSource{ - Projected: &corev1api.ProjectedVolumeSource{}, - }, - }, - { - Name: "downwardapi", - VolumeSource: corev1api.VolumeSource{ - DownwardAPI: &corev1api.DownwardAPIVolumeSource{}, - }, - }, - }, - expected: false, - }, - { - name: "volume name in list of volumes and projected", - volumeName: "projected", - podVolumes: []corev1api.Volume{ - { - Name: "restorable", - VolumeSource: corev1api.VolumeSource{ - PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{}, - }, - }, - { - Name: "projected", - VolumeSource: corev1api.VolumeSource{ - Projected: &corev1api.ProjectedVolumeSource{}, - }, - }, - { - Name: "downwardapi", - VolumeSource: corev1api.VolumeSource{ - DownwardAPI: &corev1api.DownwardAPIVolumeSource{}, - }, - }, - }, - expected: true, - }, - { - name: "volume name in list of volumes and is a DownwardAPI volume", - volumeName: "downwardapi", - podVolumes: []corev1api.Volume{ - { - Name: "restorable", - VolumeSource: corev1api.VolumeSource{ - PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{}, - }, - }, - { - Name: "projected", - VolumeSource: corev1api.VolumeSource{ - Projected: &corev1api.ProjectedVolumeSource{}, - }, - }, - { - Name: "downwardapi", - VolumeSource: corev1api.VolumeSource{ - DownwardAPI: &corev1api.DownwardAPIVolumeSource{}, - }, - }, - }, - expected: true, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - actual := volumeHasNonRestorableSource(tc.volumeName, tc.podVolumes) - assert.Equal(t, tc.expected, actual) - }) - - } -} diff --git a/pkg/restic/mocks/repository_manager.go b/pkg/restic/mocks/repository_manager.go index b164cb3e0..553370674 100644 --- a/pkg/restic/mocks/repository_manager.go +++ b/pkg/restic/mocks/repository_manager.go @@ -23,6 +23,7 @@ import ( restic "github.com/vmware-tanzu/velero/pkg/restic" v1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" + "github.com/vmware-tanzu/velero/pkg/podvolume" ) // RepositoryManager is an autogenerated mock type for the RepositoryManager type @@ -31,11 +32,11 @@ type RepositoryManager struct { } // ConnectToRepo provides a mock function with given fields: repo -func (_m *RepositoryManager) ConnectToRepo(repo *v1.ResticRepository) error { +func (_m *RepositoryManager) ConnectToRepo(repo *v1.BackupRepository) error { ret := _m.Called(repo) var r0 error - if rf, ok := ret.Get(0).(func(*v1.ResticRepository) error); ok { + if rf, ok := ret.Get(0).(func(*v1.BackupRepository) error); ok { r0 = rf(repo) } else { r0 = ret.Error(0) @@ -59,11 +60,11 @@ func (_m *RepositoryManager) Forget(_a0 context.Context, _a1 restic.SnapshotIden } // InitRepo provides a mock function with given fields: repo -func (_m *RepositoryManager) InitRepo(repo *v1.ResticRepository) error { +func (_m *RepositoryManager) InitRepo(repo *v1.BackupRepository) error { ret := _m.Called(repo) var r0 error - if rf, ok := ret.Get(0).(func(*v1.ResticRepository) error); ok { + if rf, ok := ret.Get(0).(func(repository *v1.BackupRepository) error); ok { r0 = rf(repo) } else { r0 = ret.Error(0) @@ -73,15 +74,15 @@ func (_m *RepositoryManager) InitRepo(repo *v1.ResticRepository) error { } // NewBackupper provides a mock function with given fields: _a0, _a1 -func (_m *RepositoryManager) NewBackupper(_a0 context.Context, _a1 *v1.Backup) (restic.Backupper, error) { +func (_m *RepositoryManager) NewBackupper(_a0 context.Context, _a1 *v1.Backup) (podvolume.Backupper, error) { ret := _m.Called(_a0, _a1) - var r0 restic.Backupper - if rf, ok := ret.Get(0).(func(context.Context, *v1.Backup) restic.Backupper); ok { + var r0 podvolume.Backupper + if rf, ok := ret.Get(0).(func(context.Context, *v1.Backup) podvolume.Backupper); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(restic.Backupper) + r0 = ret.Get(0).(podvolume.Backupper) } } @@ -96,15 +97,15 @@ func (_m *RepositoryManager) NewBackupper(_a0 context.Context, _a1 *v1.Backup) ( } // NewRestorer provides a mock function with given fields: _a0, _a1 -func (_m *RepositoryManager) NewRestorer(_a0 context.Context, _a1 *v1.Restore) (restic.Restorer, error) { +func (_m *RepositoryManager) NewRestorer(_a0 context.Context, _a1 *v1.Restore) (podvolume.Restorer, error) { ret := _m.Called(_a0, _a1) - var r0 restic.Restorer - if rf, ok := ret.Get(0).(func(context.Context, *v1.Restore) restic.Restorer); ok { + var r0 podvolume.Restorer + if rf, ok := ret.Get(0).(func(context.Context, *v1.Restore) podvolume.Restorer); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(restic.Restorer) + r0 = ret.Get(0).(podvolume.Restorer) } } @@ -119,11 +120,11 @@ func (_m *RepositoryManager) NewRestorer(_a0 context.Context, _a1 *v1.Restore) ( } // PruneRepo provides a mock function with given fields: repo -func (_m *RepositoryManager) PruneRepo(repo *v1.ResticRepository) error { +func (_m *RepositoryManager) PruneRepo(repo *v1.BackupRepository) error { ret := _m.Called(repo) var r0 error - if rf, ok := ret.Get(0).(func(*v1.ResticRepository) error); ok { + if rf, ok := ret.Get(0).(func(repository *v1.BackupRepository) error); ok { r0 = rf(repo) } else { r0 = ret.Error(0) @@ -133,11 +134,11 @@ func (_m *RepositoryManager) PruneRepo(repo *v1.ResticRepository) error { } // UnlockRepo provides a mock function with given fields: repo -func (_m *RepositoryManager) UnlockRepo(repo *v1.ResticRepository) error { +func (_m *RepositoryManager) UnlockRepo(repo *v1.BackupRepository) error { ret := _m.Called(repo) var r0 error - if rf, ok := ret.Get(0).(func(*v1.ResticRepository) error); ok { + if rf, ok := ret.Get(0).(func(repository *v1.BackupRepository) error); ok { r0 = rf(repo) } else { r0 = ret.Error(0) diff --git a/pkg/restic/repository_manager.go b/pkg/restic/repository_manager.go index f3ff735f9..39961fc02 100644 --- a/pkg/restic/repository_manager.go +++ b/pkg/restic/repository_manager.go @@ -18,16 +18,13 @@ package restic import ( "context" - "fmt" "os" "strconv" "github.com/pkg/errors" "github.com/sirupsen/logrus" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/cache" - kbclient "sigs.k8s.io/controller-runtime/pkg/client" "github.com/vmware-tanzu/velero/internal/credentials" @@ -36,6 +33,9 @@ import ( velerov1client "github.com/vmware-tanzu/velero/pkg/generated/clientset/versioned/typed/velero/v1" velerov1informers "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions/velero/v1" velerov1listers "github.com/vmware-tanzu/velero/pkg/generated/listers/velero/v1" + "github.com/vmware-tanzu/velero/pkg/podvolume" + "github.com/vmware-tanzu/velero/pkg/repository" + repokey "github.com/vmware-tanzu/velero/pkg/repository/keys" veleroexec "github.com/vmware-tanzu/velero/pkg/util/exec" "github.com/vmware-tanzu/velero/pkg/util/filesystem" ) @@ -43,57 +43,45 @@ import ( // RepositoryManager executes commands against restic repositories. type RepositoryManager interface { // InitRepo initializes a repo with the specified name and identifier. - InitRepo(repo *velerov1api.ResticRepository) error + InitRepo(repo *velerov1api.BackupRepository) error // ConnectToRepo runs the 'restic snapshots' command against the // specified repo, and returns an error if it fails. This is // intended to be used to ensure that the repo exists/can be // authenticated to. - ConnectToRepo(repo *velerov1api.ResticRepository) error + ConnectToRepo(repo *velerov1api.BackupRepository) error // PruneRepo deletes unused data from a repo. - PruneRepo(repo *velerov1api.ResticRepository) error + PruneRepo(repo *velerov1api.BackupRepository) error // UnlockRepo removes stale locks from a repo. - UnlockRepo(repo *velerov1api.ResticRepository) error + UnlockRepo(repo *velerov1api.BackupRepository) error // Forget removes a snapshot from the list of // available snapshots in a repo. Forget(context.Context, SnapshotIdentifier) error - BackupperFactory + podvolume.BackupperFactory - RestorerFactory -} - -// BackupperFactory can construct restic backuppers. -type BackupperFactory interface { - // NewBackupper returns a restic backupper for use during a single - // Velero backup. - NewBackupper(context.Context, *velerov1api.Backup) (Backupper, error) -} - -// RestorerFactory can construct restic restorers. -type RestorerFactory interface { - // NewRestorer returns a restic restorer for use during a single - // Velero restore. - NewRestorer(context.Context, *velerov1api.Restore) (Restorer, error) + podvolume.RestorerFactory } type repositoryManager struct { namespace string veleroClient clientset.Interface - repoLister velerov1listers.ResticRepositoryLister + repoLister velerov1listers.BackupRepositoryLister repoInformerSynced cache.InformerSynced kbClient kbclient.Client log logrus.FieldLogger - repoLocker *repoLocker - repoEnsurer *repositoryEnsurer + repoLocker *repository.RepoLocker + repoEnsurer *repository.RepositoryEnsurer fileSystem filesystem.Interface ctx context.Context pvcClient corev1client.PersistentVolumeClaimsGetter pvClient corev1client.PersistentVolumesGetter credentialsFileStore credentials.FileStore + podvolume.BackupperFactory + podvolume.RestorerFactory } const ( @@ -111,8 +99,8 @@ func NewRepositoryManager( ctx context.Context, namespace string, veleroClient clientset.Interface, - repoInformer velerov1informers.ResticRepositoryInformer, - repoClient velerov1client.ResticRepositoriesGetter, + repoInformer velerov1informers.BackupRepositoryInformer, + repoClient velerov1client.BackupRepositoriesGetter, kbClient kbclient.Client, pvcClient corev1client.PersistentVolumeClaimsGetter, pvClient corev1client.PersistentVolumesGetter, @@ -131,57 +119,19 @@ func NewRepositoryManager( log: log, ctx: ctx, - repoLocker: newRepoLocker(), - repoEnsurer: newRepositoryEnsurer(repoInformer, repoClient, log), + repoLocker: repository.NewRepoLocker(), + repoEnsurer: repository.NewRepositoryEnsurer(repoInformer, repoClient, log), fileSystem: filesystem.NewFileSystem(), } + rm.BackupperFactory = podvolume.NewBackupperFactory(rm.repoLocker, rm.repoEnsurer, rm.veleroClient, rm.pvcClient, + rm.pvClient, rm.repoInformerSynced, rm.log) + rm.RestorerFactory = podvolume.NewRestorerFactory(rm.repoLocker, rm.repoEnsurer, rm.veleroClient, rm.pvcClient, + rm.repoInformerSynced, rm.log) return rm, nil } -func (rm *repositoryManager) NewBackupper(ctx context.Context, backup *velerov1api.Backup) (Backupper, error) { - informer := velerov1informers.NewFilteredPodVolumeBackupInformer( - rm.veleroClient, - backup.Namespace, - 0, - cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, - func(opts *metav1.ListOptions) { - opts.LabelSelector = fmt.Sprintf("%s=%s", velerov1api.BackupUIDLabel, backup.UID) - }, - ) - - b := newBackupper(ctx, rm, rm.repoEnsurer, informer, rm.pvcClient, rm.pvClient, rm.log) - - go informer.Run(ctx.Done()) - if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced, rm.repoInformerSynced) { - return nil, errors.New("timed out waiting for caches to sync") - } - - return b, nil -} - -func (rm *repositoryManager) NewRestorer(ctx context.Context, restore *velerov1api.Restore) (Restorer, error) { - informer := velerov1informers.NewFilteredPodVolumeRestoreInformer( - rm.veleroClient, - restore.Namespace, - 0, - cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, - func(opts *metav1.ListOptions) { - opts.LabelSelector = fmt.Sprintf("%s=%s", velerov1api.RestoreUIDLabel, restore.UID) - }, - ) - - r := newRestorer(ctx, rm, rm.repoEnsurer, informer, rm.pvcClient, rm.log) - - go informer.Run(ctx.Done()) - if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced, rm.repoInformerSynced) { - return nil, errors.New("timed out waiting for cache to sync") - } - - return r, nil -} - -func (rm *repositoryManager) InitRepo(repo *velerov1api.ResticRepository) error { +func (rm *repositoryManager) InitRepo(repo *velerov1api.BackupRepository) error { // restic init requires an exclusive lock rm.repoLocker.LockExclusive(repo.Name) defer rm.repoLocker.UnlockExclusive(repo.Name) @@ -189,7 +139,7 @@ func (rm *repositoryManager) InitRepo(repo *velerov1api.ResticRepository) error return rm.exec(InitCommand(repo.Spec.ResticIdentifier), repo.Spec.BackupStorageLocation) } -func (rm *repositoryManager) ConnectToRepo(repo *velerov1api.ResticRepository) error { +func (rm *repositoryManager) ConnectToRepo(repo *velerov1api.BackupRepository) error { // restic snapshots requires a non-exclusive lock rm.repoLocker.Lock(repo.Name) defer rm.repoLocker.Unlock(repo.Name) @@ -204,7 +154,7 @@ func (rm *repositoryManager) ConnectToRepo(repo *velerov1api.ResticRepository) e return rm.exec(snapshotsCmd, repo.Spec.BackupStorageLocation) } -func (rm *repositoryManager) PruneRepo(repo *velerov1api.ResticRepository) error { +func (rm *repositoryManager) PruneRepo(repo *velerov1api.BackupRepository) error { // restic prune requires an exclusive lock rm.repoLocker.LockExclusive(repo.Name) defer rm.repoLocker.UnlockExclusive(repo.Name) @@ -212,7 +162,7 @@ func (rm *repositoryManager) PruneRepo(repo *velerov1api.ResticRepository) error return rm.exec(PruneCommand(repo.Spec.ResticIdentifier), repo.Spec.BackupStorageLocation) } -func (rm *repositoryManager) UnlockRepo(repo *velerov1api.ResticRepository) error { +func (rm *repositoryManager) UnlockRepo(repo *velerov1api.BackupRepository) error { // restic unlock requires a non-exclusive lock rm.repoLocker.Lock(repo.Name) defer rm.repoLocker.Unlock(repo.Name) @@ -242,7 +192,7 @@ func (rm *repositoryManager) Forget(ctx context.Context, snapshot SnapshotIdenti } func (rm *repositoryManager) exec(cmd *Command, backupLocation string) error { - file, err := rm.credentialsFileStore.Path(RepoKeySelector()) + file, err := rm.credentialsFileStore.Path(repokey.RepoKeySelector()) if err != nil { return err } diff --git a/pkg/restore/init_restorehook_pod_action_test.go b/pkg/restore/init_restorehook_pod_action_test.go index f4f8f4dfd..c69d3c23f 100644 --- a/pkg/restore/init_restorehook_pod_action_test.go +++ b/pkg/restore/init_restorehook_pod_action_test.go @@ -90,11 +90,11 @@ func TestInitContainerRestoreHookPodActionExecute(t *testing.T) { PostHooks: []velerov1api.RestoreResourceHook{ { Init: &velerov1api.InitRestoreHook{ - InitContainers: []corev1api.Container{ - *builder.ForContainer("restore-init1", "busy-box"). - Command([]string{"foobarbaz"}).Result(), - *builder.ForContainer("restore-init2", "busy-box"). - Command([]string{"foobarbaz"}).Result(), + InitContainers: []runtime.RawExtension{ + builder.ForContainer("restore-init1", "busy-box"). + Command([]string{"foobarbaz"}).ResultRawExtension(), + builder.ForContainer("restore-init2", "busy-box"). + Command([]string{"foobarbaz"}).ResultRawExtension(), }, }, }, diff --git a/pkg/restore/restic_restore_action.go b/pkg/restore/restic_restore_action.go index 91b4a6761..ba9f9cb0a 100644 --- a/pkg/restore/restic_restore_action.go +++ b/pkg/restore/restic_restore_action.go @@ -36,6 +36,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/label" "github.com/vmware-tanzu/velero/pkg/plugin/framework" "github.com/vmware-tanzu/velero/pkg/plugin/velero" + "github.com/vmware-tanzu/velero/pkg/podvolume" "github.com/vmware-tanzu/velero/pkg/restic" "github.com/vmware-tanzu/velero/pkg/util/kube" ) @@ -96,7 +97,7 @@ func (a *ResticRestoreAction) Execute(input *velero.RestoreItemActionExecuteInpu for i := range podVolumeBackupList.Items { podVolumeBackups = append(podVolumeBackups, &podVolumeBackupList.Items[i]) } - volumeSnapshots := restic.GetVolumeBackupsForPod(podVolumeBackups, &pod, podFromBackup.Namespace) + volumeSnapshots := podvolume.GetVolumeBackupsForPod(podVolumeBackups, &pod, podFromBackup.Namespace) if len(volumeSnapshots) == 0 { log.Debug("No restic backups found for pod") return velero.NewRestoreItemActionExecuteOutput(input.Item), nil diff --git a/pkg/restore/restore.go b/pkg/restore/restore.go index 691027c7c..11008b55e 100644 --- a/pkg/restore/restore.go +++ b/pkg/restore/restore.go @@ -59,7 +59,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/plugin/framework" "github.com/vmware-tanzu/velero/pkg/plugin/velero" "github.com/vmware-tanzu/velero/pkg/podexec" - "github.com/vmware-tanzu/velero/pkg/restic" + "github.com/vmware-tanzu/velero/pkg/podvolume" "github.com/vmware-tanzu/velero/pkg/util/boolptr" "github.com/vmware-tanzu/velero/pkg/util/collections" "github.com/vmware-tanzu/velero/pkg/util/filesystem" @@ -104,7 +104,7 @@ type kubernetesRestorer struct { discoveryHelper discovery.Helper dynamicFactory client.DynamicFactory namespaceClient corev1.NamespaceInterface - resticRestorerFactory restic.RestorerFactory + resticRestorerFactory podvolume.RestorerFactory resticTimeout time.Duration resourceTerminatingTimeout time.Duration resourcePriorities []string @@ -122,7 +122,7 @@ func NewKubernetesRestorer( dynamicFactory client.DynamicFactory, resourcePriorities []string, namespaceClient corev1.NamespaceInterface, - resticRestorerFactory restic.RestorerFactory, + resticRestorerFactory podvolume.RestorerFactory, resticTimeout time.Duration, resourceTerminatingTimeout time.Duration, logger logrus.FieldLogger, @@ -248,7 +248,7 @@ func (kr *kubernetesRestorer) RestoreWithResolvers( ctx, cancelFunc := go_context.WithTimeout(go_context.Background(), podVolumeTimeout) defer cancelFunc() - var resticRestorer restic.Restorer + var resticRestorer podvolume.Restorer if kr.resticRestorerFactory != nil { resticRestorer, err = kr.resticRestorerFactory.NewRestorer(ctx, req.Restore) if err != nil { @@ -338,7 +338,7 @@ type restoreContext struct { restoreItemActions []framework.RestoreItemResolvedAction itemSnapshotterActions []framework.ItemSnapshotterResolvedAction volumeSnapshotterGetter VolumeSnapshotterGetter - resticRestorer restic.Restorer + resticRestorer podvolume.Restorer resticWaitGroup sync.WaitGroup resticErrs chan error pvsToProvision sets.String @@ -1394,7 +1394,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso // Do not create podvolumerestore when current restore excludes pv/pvc if ctx.resourceIncludesExcludes.ShouldInclude(kuberesource.PersistentVolumeClaims.String()) && ctx.resourceIncludesExcludes.ShouldInclude(kuberesource.PersistentVolumes.String()) && - len(restic.GetVolumeBackupsForPod(ctx.podVolumeBackups, pod, originalNamespace)) > 0 { + len(podvolume.GetVolumeBackupsForPod(ctx.podVolumeBackups, pod, originalNamespace)) > 0 { restorePodVolumeBackups(ctx, createdObj, originalNamespace) } } @@ -1549,7 +1549,7 @@ func restorePodVolumeBackups(ctx *restoreContext, createdObj *unstructured.Unstr return } - data := restic.RestoreData{ + data := podvolume.RestoreData{ Restore: ctx.restore, Pod: pod, PodVolumeBackups: ctx.podVolumeBackups, @@ -1631,7 +1631,7 @@ func hasResticBackup(unstructuredPV *unstructured.Unstructured, ctx *restoreCont var found bool for _, pvb := range ctx.podVolumeBackups { - if pvb.Spec.Pod.Namespace == pv.Spec.ClaimRef.Namespace && pvb.GetAnnotations()[restic.PVCNameAnnotation] == pv.Spec.ClaimRef.Name { + if pvb.Spec.Pod.Namespace == pv.Spec.ClaimRef.Namespace && pvb.GetAnnotations()[podvolume.PVCNameAnnotation] == pv.Spec.ClaimRef.Name { found = true break } diff --git a/pkg/restore/restore_test.go b/pkg/restore/restore_test.go index 7655253d1..404d45e1a 100644 --- a/pkg/restore/restore_test.go +++ b/pkg/restore/restore_test.go @@ -48,8 +48,8 @@ import ( velerov1informers "github.com/vmware-tanzu/velero/pkg/generated/informers/externalversions" "github.com/vmware-tanzu/velero/pkg/kuberesource" "github.com/vmware-tanzu/velero/pkg/plugin/velero" - "github.com/vmware-tanzu/velero/pkg/restic" - resticmocks "github.com/vmware-tanzu/velero/pkg/restic/mocks" + "github.com/vmware-tanzu/velero/pkg/podvolume" + uploadermocks "github.com/vmware-tanzu/velero/pkg/podvolume/mocks" "github.com/vmware-tanzu/velero/pkg/test" testutil "github.com/vmware-tanzu/velero/pkg/test" "github.com/vmware-tanzu/velero/pkg/util/kube" @@ -2681,10 +2681,10 @@ func TestRestorePersistentVolumes(t *testing.T) { } type fakeResticRestorerFactory struct { - restorer *resticmocks.Restorer + restorer *uploadermocks.Restorer } -func (f *fakeResticRestorerFactory) NewRestorer(context.Context, *velerov1api.Restore) (restic.Restorer, error) { +func (f *fakeResticRestorerFactory) NewRestorer(context.Context, *velerov1api.Restore) (podvolume.Restorer, error) { return f.restorer, nil } @@ -2749,7 +2749,7 @@ func TestRestoreWithRestic(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { h := newHarness(t) - restorer := new(resticmocks.Restorer) + restorer := new(uploadermocks.Restorer) defer restorer.AssertExpectations(t) h.restorer.resticRestorerFactory = &fakeResticRestorerFactory{ restorer: restorer, @@ -2773,7 +2773,7 @@ func TestRestoreWithRestic(t *testing.T) { // the restore process adds these labels before restoring, so we must add them here too otherwise they won't match pod.Labels = map[string]string{"velero.io/backup-name": tc.backup.Name, "velero.io/restore-name": tc.restore.Name} - expectedArgs := restic.RestoreData{ + expectedArgs := podvolume.RestoreData{ Restore: tc.restore, Pod: pod, PodVolumeBackups: tc.podVolumeBackups, diff --git a/pkg/util/kube/predicate.go b/pkg/util/kube/predicate.go new file mode 100644 index 000000000..3073ef881 --- /dev/null +++ b/pkg/util/kube/predicate.go @@ -0,0 +1,47 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube + +import ( + "reflect" + + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// SpecChangePredicate implements a default update predicate function on Spec change +// As Velero doesn't enable subresource in CRDs, we cannot use the object's metadata.generation field to check the spec change +// More details about the generation field refer to https://github.com/kubernetes-sigs/controller-runtime/blob/v0.12.2/pkg/predicate/predicate.go#L156 +type SpecChangePredicate struct { + predicate.Funcs +} + +func (SpecChangePredicate) Update(e event.UpdateEvent) bool { + if e.ObjectOld == nil { + return false + } + if e.ObjectNew == nil { + return false + } + oldSpec := reflect.ValueOf(e.ObjectOld).Elem().FieldByName("Spec") + // contains no field named "Spec", return false directly + if oldSpec.IsZero() { + return false + } + newSpec := reflect.ValueOf(e.ObjectNew).Elem().FieldByName("Spec") + return !reflect.DeepEqual(oldSpec.Interface(), newSpec.Interface()) +} diff --git a/pkg/util/kube/predicate_test.go b/pkg/util/kube/predicate_test.go new file mode 100644 index 000000000..d1c3be8df --- /dev/null +++ b/pkg/util/kube/predicate_test.go @@ -0,0 +1,180 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + corev1api "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + + velerov1 "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" +) + +func TestSpecChangePredicate(t *testing.T) { + cases := []struct { + name string + oldObj client.Object + newObj client.Object + changed bool + }{ + { + name: "Contains no spec field", + oldObj: &velerov1.BackupStorageLocation{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bsl01", + }, + }, + newObj: &velerov1.BackupStorageLocation{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bsl01", + }, + }, + changed: false, + }, + { + name: "ObjectMetas are different, Specs are same", + oldObj: &velerov1.BackupStorageLocation{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bsl01", + Annotations: map[string]string{"key1": "value1"}, + }, + Spec: velerov1.BackupStorageLocationSpec{ + Provider: "azure", + }, + }, + newObj: &velerov1.BackupStorageLocation{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bsl01", + Annotations: map[string]string{"key2": "value2"}, + }, + Spec: velerov1.BackupStorageLocationSpec{ + Provider: "azure", + }, + }, + changed: false, + }, + { + name: "Statuses are different, Specs are same", + oldObj: &velerov1.BackupStorageLocation{ + Spec: velerov1.BackupStorageLocationSpec{ + Provider: "azure", + }, + Status: velerov1.BackupStorageLocationStatus{ + Phase: velerov1.BackupStorageLocationPhaseAvailable, + }, + }, + newObj: &velerov1.BackupStorageLocation{ + Spec: velerov1.BackupStorageLocationSpec{ + Provider: "azure", + }, + Status: velerov1.BackupStorageLocationStatus{ + Phase: velerov1.BackupStorageLocationPhaseUnavailable, + }, + }, + changed: false, + }, + { + name: "Specs are different", + oldObj: &velerov1.BackupStorageLocation{ + Spec: velerov1.BackupStorageLocationSpec{ + Provider: "azure", + }, + }, + newObj: &velerov1.BackupStorageLocation{ + Spec: velerov1.BackupStorageLocationSpec{ + Provider: "aws", + }, + }, + changed: true, + }, + { + name: "Specs are same", + oldObj: &velerov1.BackupStorageLocation{ + Spec: velerov1.BackupStorageLocationSpec{ + Provider: "azure", + Config: map[string]string{"key": "value"}, + Credential: &corev1api.SecretKeySelector{ + LocalObjectReference: corev1api.LocalObjectReference{ + Name: "secret", + }, + Key: "credential", + }, + StorageType: velerov1.StorageType{ + ObjectStorage: &velerov1.ObjectStorageLocation{ + Bucket: "bucket1", + Prefix: "prefix", + CACert: []byte{'a'}, + }, + }, + Default: true, + AccessMode: velerov1.BackupStorageLocationAccessModeReadWrite, + BackupSyncPeriod: &metav1.Duration{ + Duration: 1 * time.Minute, + }, + ValidationFrequency: &metav1.Duration{ + Duration: 1 * time.Minute, + }, + }, + }, + newObj: &velerov1.BackupStorageLocation{ + Spec: velerov1.BackupStorageLocationSpec{ + Provider: "azure", + Config: map[string]string{"key": "value"}, + Credential: &corev1api.SecretKeySelector{ + LocalObjectReference: corev1api.LocalObjectReference{ + Name: "secret", + }, + Key: "credential", + }, + StorageType: velerov1.StorageType{ + ObjectStorage: &velerov1.ObjectStorageLocation{ + Bucket: "bucket1", + Prefix: "prefix", + CACert: []byte{'a'}, + }, + }, + Default: true, + AccessMode: velerov1.BackupStorageLocationAccessModeReadWrite, + BackupSyncPeriod: &metav1.Duration{ + Duration: 1 * time.Minute, + }, + ValidationFrequency: &metav1.Duration{ + Duration: 1 * time.Minute, + }, + }, + }, + changed: false, + }, + } + + predicate := SpecChangePredicate{} + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + changed := predicate.Update(event.UpdateEvent{ + ObjectOld: c.oldObj, + ObjectNew: c.newObj, + }) + assert.Equal(t, c.changed, changed) + }) + } +} diff --git a/pkg/util/kube/utils.go b/pkg/util/kube/utils.go index 24b2ef6c7..bf7ac0011 100644 --- a/pkg/util/kube/utils.go +++ b/pkg/util/kube/utils.go @@ -34,15 +34,20 @@ import ( "k8s.io/apimachinery/pkg/util/wait" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/vmware-tanzu/velero/pkg/util/filesystem" ) // These annotations are taken from the Kubernetes persistent volume/persistent volume claim controller. // They cannot be directly importing because they are part of the kubernetes/kubernetes package, and importing that package is unsupported. // Their values are well-known and slow changing. They're duplicated here as constants to provide compile-time checking. // Originals can be found in kubernetes/kubernetes/pkg/controller/volume/persistentvolume/util/util.go. -const KubeAnnBindCompleted = "pv.kubernetes.io/bind-completed" -const KubeAnnBoundByController = "pv.kubernetes.io/bound-by-controller" -const KubeAnnDynamicallyProvisioned = "pv.kubernetes.io/provisioned-by" +const ( + KubeAnnBindCompleted = "pv.kubernetes.io/bind-completed" + KubeAnnBoundByController = "pv.kubernetes.io/bound-by-controller" + KubeAnnDynamicallyProvisioned = "pv.kubernetes.io/provisioned-by" + KubeAnnMigratedTo = "pv.kubernetes.io/migrated-to" +) // NamespaceAndName returns a string in the format / func NamespaceAndName(objMeta metav1.Object) string { @@ -163,6 +168,9 @@ func GetVolumeDirectory(ctx context.Context, log logrus.FieldLogger, pod *corev1 return pvc.Spec.VolumeName, nil } +// isProvisionedByCSI function checks whether this is a CSI PV by annotation. +// Either "pv.kubernetes.io/provisioned-by" or "pv.kubernetes.io/migrated-to" indicates +// PV is provisioned by CSI. func isProvisionedByCSI(log logrus.FieldLogger, pv *corev1api.PersistentVolume, kbClient client.Client) (bool, error) { if pv.Spec.CSI != nil { return true, nil @@ -171,14 +179,15 @@ func isProvisionedByCSI(log logrus.FieldLogger, pv *corev1api.PersistentVolume, // Refer to https://github.com/vmware-tanzu/velero/issues/4496 for more details if pv.Annotations != nil { driverName := pv.Annotations[KubeAnnDynamicallyProvisioned] - if len(driverName) > 0 { + migratedDriver := pv.Annotations[KubeAnnMigratedTo] + if len(driverName) > 0 || len(migratedDriver) > 0 { list := &storagev1api.CSIDriverList{} if err := kbClient.List(context.TODO(), list); err != nil { return false, err } for _, driver := range list.Items { - if driverName == driver.Name { - log.Debugf("the annotation %s=%s indicates the volume is provisioned by a CSI driver", KubeAnnDynamicallyProvisioned, driverName) + if driverName == driver.Name || migratedDriver == driver.Name { + log.Debugf("the annotation %s or %s equals to %s indicates the volume is provisioned by a CSI driver", KubeAnnDynamicallyProvisioned, KubeAnnMigratedTo, driverName) return true, nil } } @@ -187,6 +196,21 @@ func isProvisionedByCSI(log logrus.FieldLogger, pv *corev1api.PersistentVolume, return false, nil } +// SinglePathMatch function will be called by PVB and PVR controller to check whether pass-in volume path is valid. +// Check whether there is only one match by the path's pattern (/host_pods/%s/volumes/*/volume_name/[mount|]). +func SinglePathMatch(path string, fs filesystem.Interface, log logrus.FieldLogger) (string, error) { + matches, err := fs.Glob(path) + if err != nil { + return "", errors.WithStack(err) + } + if len(matches) != 1 { + return "", errors.Errorf("expected one matching path: %s, got %d", path, len(matches)) + } + + log.Debugf("This is a valid volume path: %s.", matches[0]) + return matches[0], nil +} + // IsV1CRDReady checks a v1 CRD to see if it's ready, with both the Established and NamesAccepted conditions. func IsV1CRDReady(crd *apiextv1.CustomResourceDefinition) bool { var isEstablished, namesAccepted bool diff --git a/pkg/util/kube/utils_test.go b/pkg/util/kube/utils_test.go index 4a6db6069..178fa425f 100644 --- a/pkg/util/kube/utils_test.go +++ b/pkg/util/kube/utils_test.go @@ -197,6 +197,13 @@ func TestGetVolumeDirectorySuccess(t *testing.T) { pv: builder.ForPersistentVolume("a-pv").ObjectMeta(builder.WithAnnotations(KubeAnnDynamicallyProvisioned, "csi.test.com")).Result(), want: "a-pv/mount", }, + { + name: "Volume with CSI annotation 'pv.kubernetes.io/migrated-to' appends '/mount' to the volume name", + pod: builder.ForPod("ns-1", "my-pod").Volumes(builder.ForVolume("my-vol").PersistentVolumeClaimSource("my-pvc").Result()).Result(), + pvc: builder.ForPersistentVolumeClaim("ns-1", "my-pvc").VolumeName("a-pv").Result(), + pv: builder.ForPersistentVolume("a-pv").ObjectMeta(builder.WithAnnotations(KubeAnnMigratedTo, "csi.test.com")).Result(), + want: "a-pv/mount", + }, } csiDriver := storagev1api.CSIDriver{ @@ -425,3 +432,13 @@ func TestIsCRDReady(t *testing.T) { _, err = IsCRDReady(obj) assert.NotNil(t, err) } + +func TestSinglePathMatch(t *testing.T) { + fakeFS := velerotest.NewFakeFileSystem() + fakeFS.MkdirAll("testDir1/subpath", 0755) + fakeFS.MkdirAll("testDir2/subpath", 0755) + + _, err := SinglePathMatch("./*/subpath", fakeFS, logrus.StandardLogger()) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "expected one matching path") +} diff --git a/pkg/util/ownership/backup_owner.go b/pkg/util/ownership/backup_owner.go new file mode 100644 index 000000000..078c799dd --- /dev/null +++ b/pkg/util/ownership/backup_owner.go @@ -0,0 +1,42 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ownership + +import "github.com/vmware-tanzu/velero/pkg/repository/udmrepo" + +const ( + defaultOwnerUsername = "default" + defaultOwnerDomain = "default" +) + +// GetBackupOwner returns the owner used by uploaders when saving a snapshot or +// opening the unified repository. At present, use the default owner only +func GetBackupOwner() udmrepo.OwnershipOptions { + return udmrepo.OwnershipOptions{ + Username: defaultOwnerUsername, + DomainName: defaultOwnerDomain, + } +} + +// GetBackupOwner returns the owner used to create/connect the unified repository. +//At present, use the default owner only +func GetRepositoryOwner() udmrepo.OwnershipOptions { + return udmrepo.OwnershipOptions{ + Username: defaultOwnerUsername, + DomainName: defaultOwnerDomain, + } +} diff --git a/site/content/community/_index.md b/site/content/community/_index.md index 8680073fe..b243ce1d9 100644 --- a/site/content/community/_index.md +++ b/site/content/community/_index.md @@ -14,7 +14,7 @@ You can follow the work we do, see our milestones, and our backlog on our [GitHu * Follow us on Twitter at [@projectvelero](https://twitter.com/projectvelero) * Join our Kubernetes Slack channel and talk to over 800 other community members: [#velero](https://kubernetes.slack.com/messages/velero) * Join our [Google Group](https://groups.google.com/forum/#!forum/projectvelero) to get updates on the project and invites to community meetings. -* Join the Velero community meetings - [Zoom link](https://vmware.zoom.us/j/551441444?pwd=dHJyMWZtdHFPWWFJaTh5TnFuYWMvZz09): +* Join the Velero community meetings - [Zoom link](https://VMware.zoom.us/j/94501971662?pwd=aUxVbWVEWHZSbDh4ZGdGU1cxYUFoZz09): * 1st and 3rd Tuesday at 12PM ET / 9AM PT ([Convert to your time zone](https://dateful.com/convert/est-edt-eastern-time?t=12pm)) * 2nd and 4th Wednesday at 8am China Standard Time / Tuesday 7pm EST (8pm EDT) / Tuesday 4pm PST (5pm PDT) ([Convert to your time zone](https://dateful.com/convert/beijing-china?t=8am)) * Read and comment on the [meeting notes](https://hackmd.io/Jq6F5zqZR7S80CeDWUklkA?view) diff --git a/site/content/docs/main/restore-reference.md b/site/content/docs/main/restore-reference.md index 0511f6512..a91ef960d 100644 --- a/site/content/docs/main/restore-reference.md +++ b/site/content/docs/main/restore-reference.md @@ -73,7 +73,7 @@ The following is an overview of Velero's restore process that starts after you r By default, Velero will restore resources in the following order: * Custom Resource Definitions -* Mamespaces +* Namespaces * StorageClasses * VolumeSnapshotClass * VolumeSnapshotContents diff --git a/site/content/docs/v1.4/resource-filtering.md b/site/content/docs/v1.4/resource-filtering.md index 6c1470e04..0b893c49f 100644 --- a/site/content/docs/v1.4/resource-filtering.md +++ b/site/content/docs/v1.4/resource-filtering.md @@ -24,7 +24,7 @@ Wildcard takes precedence when both a wildcard and specific resource are include * Restore two namespaces and their objects. ```bash - velero restore create --include-namespaces , + velero restore create --include-namespaces , --from-backup ``` ### --include-resources @@ -38,7 +38,7 @@ Wildcard takes precedence when both a wildcard and specific resource are include * Restore all deployments and configmaps in the cluster. ```bash - velero restore create --include-resources deployments,configmaps + velero restore create --include-resources deployments,configmaps --from-backup ``` * Backup the deployments in a namespace. @@ -72,7 +72,7 @@ Wildcard takes precedence when both a wildcard and specific resource are include * Restore only namespaced resources in the cluster. ```bash - velero restore create --include-cluster-resources=false + velero restore create --include-cluster-resources=false --from-backup ``` * Backup a namespace and include cluster-scoped resources. @@ -107,7 +107,7 @@ Wildcard excludes are ignored. * Exclude two namespaces during a restore. ```bash - velero restore create --exclude-namespaces , + velero restore create --exclude-namespaces , --from-backup ``` ### --exclude-resources diff --git a/site/content/docs/v1.5/resource-filtering.md b/site/content/docs/v1.5/resource-filtering.md index 6c1470e04..0b893c49f 100644 --- a/site/content/docs/v1.5/resource-filtering.md +++ b/site/content/docs/v1.5/resource-filtering.md @@ -24,7 +24,7 @@ Wildcard takes precedence when both a wildcard and specific resource are include * Restore two namespaces and their objects. ```bash - velero restore create --include-namespaces , + velero restore create --include-namespaces , --from-backup ``` ### --include-resources @@ -38,7 +38,7 @@ Wildcard takes precedence when both a wildcard and specific resource are include * Restore all deployments and configmaps in the cluster. ```bash - velero restore create --include-resources deployments,configmaps + velero restore create --include-resources deployments,configmaps --from-backup ``` * Backup the deployments in a namespace. @@ -72,7 +72,7 @@ Wildcard takes precedence when both a wildcard and specific resource are include * Restore only namespaced resources in the cluster. ```bash - velero restore create --include-cluster-resources=false + velero restore create --include-cluster-resources=false --from-backup ``` * Backup a namespace and include cluster-scoped resources. @@ -107,7 +107,7 @@ Wildcard excludes are ignored. * Exclude two namespaces during a restore. ```bash - velero restore create --exclude-namespaces , + velero restore create --exclude-namespaces , --from-backup ``` ### --exclude-resources diff --git a/site/content/docs/v1.6/resource-filtering.md b/site/content/docs/v1.6/resource-filtering.md index 6c1470e04..0b893c49f 100644 --- a/site/content/docs/v1.6/resource-filtering.md +++ b/site/content/docs/v1.6/resource-filtering.md @@ -24,7 +24,7 @@ Wildcard takes precedence when both a wildcard and specific resource are include * Restore two namespaces and their objects. ```bash - velero restore create --include-namespaces , + velero restore create --include-namespaces , --from-backup ``` ### --include-resources @@ -38,7 +38,7 @@ Wildcard takes precedence when both a wildcard and specific resource are include * Restore all deployments and configmaps in the cluster. ```bash - velero restore create --include-resources deployments,configmaps + velero restore create --include-resources deployments,configmaps --from-backup ``` * Backup the deployments in a namespace. @@ -72,7 +72,7 @@ Wildcard takes precedence when both a wildcard and specific resource are include * Restore only namespaced resources in the cluster. ```bash - velero restore create --include-cluster-resources=false + velero restore create --include-cluster-resources=false --from-backup ``` * Backup a namespace and include cluster-scoped resources. @@ -107,7 +107,7 @@ Wildcard excludes are ignored. * Exclude two namespaces during a restore. ```bash - velero restore create --exclude-namespaces , + velero restore create --exclude-namespaces , --from-backup ``` ### --exclude-resources diff --git a/site/content/docs/v1.7/resource-filtering.md b/site/content/docs/v1.7/resource-filtering.md index 6c1470e04..0b893c49f 100644 --- a/site/content/docs/v1.7/resource-filtering.md +++ b/site/content/docs/v1.7/resource-filtering.md @@ -24,7 +24,7 @@ Wildcard takes precedence when both a wildcard and specific resource are include * Restore two namespaces and their objects. ```bash - velero restore create --include-namespaces , + velero restore create --include-namespaces , --from-backup ``` ### --include-resources @@ -38,7 +38,7 @@ Wildcard takes precedence when both a wildcard and specific resource are include * Restore all deployments and configmaps in the cluster. ```bash - velero restore create --include-resources deployments,configmaps + velero restore create --include-resources deployments,configmaps --from-backup ``` * Backup the deployments in a namespace. @@ -72,7 +72,7 @@ Wildcard takes precedence when both a wildcard and specific resource are include * Restore only namespaced resources in the cluster. ```bash - velero restore create --include-cluster-resources=false + velero restore create --include-cluster-resources=false --from-backup ``` * Backup a namespace and include cluster-scoped resources. @@ -107,7 +107,7 @@ Wildcard excludes are ignored. * Exclude two namespaces during a restore. ```bash - velero restore create --exclude-namespaces , + velero restore create --exclude-namespaces , --from-backup ``` ### --exclude-resources diff --git a/site/content/docs/v1.8/resource-filtering.md b/site/content/docs/v1.8/resource-filtering.md index ddcd97e4a..1f2d2133d 100644 --- a/site/content/docs/v1.8/resource-filtering.md +++ b/site/content/docs/v1.8/resource-filtering.md @@ -24,7 +24,7 @@ Wildcard takes precedence when both a wildcard and specific resource are include * Restore two namespaces and their objects. ```bash - velero restore create --include-namespaces , + velero restore create --include-namespaces , --from-backup ``` ### --include-resources @@ -38,7 +38,7 @@ Wildcard takes precedence when both a wildcard and specific resource are include * Restore all deployments and configmaps in the cluster. ```bash - velero restore create --include-resources deployments,configmaps + velero restore create --include-resources deployments,configmaps --from-backup ``` * Backup the deployments in a namespace. @@ -72,7 +72,7 @@ Wildcard takes precedence when both a wildcard and specific resource are include * Restore only namespaced resources in the cluster. ```bash - velero restore create --include-cluster-resources=false + velero restore create --include-cluster-resources=false --from-backup ``` * Backup a namespace and include cluster-scoped resources. @@ -113,7 +113,7 @@ Wildcard excludes are ignored. * Exclude two namespaces during a restore. ```bash - velero restore create --exclude-namespaces , + velero restore create --exclude-namespaces , --from-backup ``` ### --exclude-resources diff --git a/site/content/docs/v1.9/resource-filtering.md b/site/content/docs/v1.9/resource-filtering.md index 02ae3d68d..5c0b34c73 100644 --- a/site/content/docs/v1.9/resource-filtering.md +++ b/site/content/docs/v1.9/resource-filtering.md @@ -26,7 +26,7 @@ Namespaces to include. Default is `*`, all namespaces. * Restore two namespaces and their objects. ```bash - velero restore create --include-namespaces , + velero restore create --include-namespaces , --from-backup ``` ### --include-resources @@ -42,7 +42,7 @@ Kubernetes resources to include in the backup, formatted as resource.group, such * Restore all deployments and configmaps in the cluster. ```bash - velero restore create --include-resources deployments,configmaps + velero restore create --include-resources deployments,configmaps --from-backup ``` * Backup the deployments in a namespace. @@ -76,7 +76,7 @@ Includes cluster-scoped resources. This option can have three possible values: * Restore only namespaced resources in the cluster. ```bash - velero restore create --include-cluster-resources=false + velero restore create --include-cluster-resources=false --from-backup ``` * Backup a namespace and include cluster-scoped resources. @@ -119,7 +119,7 @@ Namespaces to exclude. * Exclude two namespaces during a restore. ```bash - velero restore create --exclude-namespaces , + velero restore create --exclude-namespaces , --from-backup ``` ### --exclude-resources diff --git a/site/content/docs/v1.9/restore-reference.md b/site/content/docs/v1.9/restore-reference.md index 0511f6512..a91ef960d 100644 --- a/site/content/docs/v1.9/restore-reference.md +++ b/site/content/docs/v1.9/restore-reference.md @@ -73,7 +73,7 @@ The following is an overview of Velero's restore process that starts after you r By default, Velero will restore resources in the following order: * Custom Resource Definitions -* Mamespaces +* Namespaces * StorageClasses * VolumeSnapshotClass * VolumeSnapshotContents